xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 27d564647e9b50e713c60b0d7e5ea2a9b0a3ae74)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_htt.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
30 #include "cdp_txrx_cmn_struct.h"
31 
32 #ifdef FEATURE_PERPKT_INFO
33 #include "dp_ratetable.h"
34 #endif
35 
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 
38 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
39 #define HTT_T2H_MAX_MSG_SIZE 2048
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
48 do {                                                             \
49 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
50 					QDF_STATUS_SUCCESS)      \
51 		htt_htc_misc_pkt_list_add(soc, pkt);             \
52 } while (0)
53 
54 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
55 
56 /**
57  * Bitmap of HTT PPDU TLV types for Default mode
58  */
59 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
60 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
61 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
62 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
63 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
64 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
65 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
66 
67 /**
68  * Bitmap of HTT PPDU TLV types for Sniffer mode
69  */
70 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP \
71 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
72 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
74 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
75 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
76 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
77 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
78 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
79 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV)
80 
81 #define HTT_FRAMECTRL_DATATYPE 0x08
82 #define HTT_PPDU_DESC_MAX_DEPTH 16
83 #define DP_SCAN_PEER_ID 0xFFFF
84 
85 /*
86  * dp_tx_stats_update() - Update per-peer statistics
87  * @soc: Datapath soc handle
88  * @peer: Datapath peer handle
89  * @ppdu: PPDU Descriptor
90  * @ack_rssi: RSSI of last ack received
91  *
92  * Return: None
93  */
94 #ifdef FEATURE_PERPKT_INFO
95 static inline void
96 dp_tx_rate_stats_update(struct dp_peer *peer,
97 			struct cdp_tx_completion_ppdu_user *ppdu)
98 {
99 	uint32_t ratekbps = 0;
100 	uint32_t ppdu_tx_rate = 0;
101 
102 	if (!peer || !ppdu)
103 		return;
104 
105 
106 	ratekbps = dp_getrateindex(ppdu->gi,
107 				   ppdu->mcs,
108 				   ppdu->nss,
109 				   ppdu->preamble,
110 				   ppdu->bw);
111 
112 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
113 
114 	if (!ratekbps)
115 		return;
116 
117 	dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
118 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
119 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
120 
121 	if (peer->vdev) {
122 		if (peer->bss_peer) {
123 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
124 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
125 		} else {
126 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
127 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
128 		}
129 	}
130 }
131 
132 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
133 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
134 {
135 	struct dp_pdev *pdev = peer->vdev->pdev;
136 	uint8_t preamble, mcs;
137 	uint16_t num_msdu;
138 
139 	preamble = ppdu->preamble;
140 	mcs = ppdu->mcs;
141 	num_msdu = ppdu->num_msdu;
142 
143 	/* If the peer statistics are already processed as part of
144 	 * per-MSDU completion handler, do not process these again in per-PPDU
145 	 * indications */
146 	if (soc->process_tx_status)
147 		return;
148 
149 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
150 			num_msdu, (ppdu->success_bytes +
151 				ppdu->retry_bytes + ppdu->failed_bytes));
152 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
153 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
154 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
155 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
156 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
157 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
158 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
159 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
160 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
161 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
162 
163 	DP_STATS_INC(peer, tx.retries,
164 			(ppdu->long_retries + ppdu->short_retries));
165 	DP_STATS_INCC(peer,
166 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
167 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
168 	DP_STATS_INCC(peer,
169 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
170 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
171 	DP_STATS_INCC(peer,
172 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
173 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
174 	DP_STATS_INCC(peer,
175 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
176 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
177 	DP_STATS_INCC(peer,
178 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
179 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
180 	DP_STATS_INCC(peer,
181 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
182 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
183 	DP_STATS_INCC(peer,
184 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
185 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
186 	DP_STATS_INCC(peer,
187 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
188 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
189 	DP_STATS_INCC(peer,
190 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
191 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
192 	DP_STATS_INCC(peer,
193 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
194 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
195 
196 	dp_peer_stats_notify(peer);
197 
198 	if (peer->stats.tx.ucast.num)
199 		peer->stats.tx.last_per = ((peer->stats.tx.ucast.num -
200 					peer->stats.tx.tx_success.num) * 100) /
201 					peer->stats.tx.ucast.num;
202 
203 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
204 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
205 				&peer->stats, ppdu->peer_id,
206 				UPDATE_PEER_STATS);
207 
208 	}
209 }
210 #endif
211 
212 /*
213  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
214  * @htt_soc:	HTT SOC handle
215  *
216  * Return: Pointer to htc packet buffer
217  */
218 static struct dp_htt_htc_pkt *
219 htt_htc_pkt_alloc(struct htt_soc *soc)
220 {
221 	struct dp_htt_htc_pkt_union *pkt = NULL;
222 
223 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
224 	if (soc->htt_htc_pkt_freelist) {
225 		pkt = soc->htt_htc_pkt_freelist;
226 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
227 	}
228 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
229 
230 	if (pkt == NULL)
231 		pkt = qdf_mem_malloc(sizeof(*pkt));
232 	return &pkt->u.pkt; /* not actually a dereference */
233 }
234 
235 /*
236  * htt_htc_pkt_free() - Free HTC packet buffer
237  * @htt_soc:	HTT SOC handle
238  */
239 static void
240 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
241 {
242 	struct dp_htt_htc_pkt_union *u_pkt =
243 		(struct dp_htt_htc_pkt_union *)pkt;
244 
245 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
246 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
247 	soc->htt_htc_pkt_freelist = u_pkt;
248 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
249 }
250 
251 /*
252  * htt_htc_pkt_pool_free() - Free HTC packet pool
253  * @htt_soc:	HTT SOC handle
254  */
255 static void
256 htt_htc_pkt_pool_free(struct htt_soc *soc)
257 {
258 	struct dp_htt_htc_pkt_union *pkt, *next;
259 	pkt = soc->htt_htc_pkt_freelist;
260 	while (pkt) {
261 		next = pkt->u.next;
262 		qdf_mem_free(pkt);
263 		pkt = next;
264 	}
265 	soc->htt_htc_pkt_freelist = NULL;
266 }
267 
268 /*
269  * htt_htc_misc_pkt_list_trim() - trim misc list
270  * @htt_soc: HTT SOC handle
271  * @level: max no. of pkts in list
272  */
273 static void
274 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
275 {
276 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
277 	int i = 0;
278 	qdf_nbuf_t netbuf;
279 
280 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
281 	pkt = soc->htt_htc_pkt_misclist;
282 	while (pkt) {
283 		next = pkt->u.next;
284 		/* trim the out grown list*/
285 		if (++i > level) {
286 			netbuf =
287 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
288 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
289 			qdf_nbuf_free(netbuf);
290 			qdf_mem_free(pkt);
291 			pkt = NULL;
292 			if (prev)
293 				prev->u.next = NULL;
294 		}
295 		prev = pkt;
296 		pkt = next;
297 	}
298 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
299 }
300 
301 /*
302  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
303  * @htt_soc:	HTT SOC handle
304  * @dp_htt_htc_pkt: pkt to be added to list
305  */
306 static void
307 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
308 {
309 	struct dp_htt_htc_pkt_union *u_pkt =
310 				(struct dp_htt_htc_pkt_union *)pkt;
311 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
312 							pkt->htc_pkt.Endpoint)
313 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
314 
315 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
316 	if (soc->htt_htc_pkt_misclist) {
317 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
318 		soc->htt_htc_pkt_misclist = u_pkt;
319 	} else {
320 		soc->htt_htc_pkt_misclist = u_pkt;
321 	}
322 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
323 
324 	/* only ce pipe size + tx_queue_depth could possibly be in use
325 	 * free older packets in the misclist
326 	 */
327 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
328 }
329 
330 /*
331  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
332  * @htt_soc:	HTT SOC handle
333  */
334 static void
335 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
336 {
337 	struct dp_htt_htc_pkt_union *pkt, *next;
338 	qdf_nbuf_t netbuf;
339 
340 	pkt = soc->htt_htc_pkt_misclist;
341 
342 	while (pkt) {
343 		next = pkt->u.next;
344 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
345 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
346 
347 		soc->stats.htc_pkt_free++;
348 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
349 			 "%s: Pkt free count %d",
350 			 __func__, soc->stats.htc_pkt_free);
351 
352 		qdf_nbuf_free(netbuf);
353 		qdf_mem_free(pkt);
354 		pkt = next;
355 	}
356 	soc->htt_htc_pkt_misclist = NULL;
357 }
358 
359 /*
360  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
361  * @tgt_mac_addr:	Target MAC
362  * @buffer:		Output buffer
363  */
364 static u_int8_t *
365 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
366 {
367 #ifdef BIG_ENDIAN_HOST
368 	/*
369 	 * The host endianness is opposite of the target endianness.
370 	 * To make u_int32_t elements come out correctly, the target->host
371 	 * upload has swizzled the bytes in each u_int32_t element of the
372 	 * message.
373 	 * For byte-array message fields like the MAC address, this
374 	 * upload swizzling puts the bytes in the wrong order, and needs
375 	 * to be undone.
376 	 */
377 	buffer[0] = tgt_mac_addr[3];
378 	buffer[1] = tgt_mac_addr[2];
379 	buffer[2] = tgt_mac_addr[1];
380 	buffer[3] = tgt_mac_addr[0];
381 	buffer[4] = tgt_mac_addr[7];
382 	buffer[5] = tgt_mac_addr[6];
383 	return buffer;
384 #else
385 	/*
386 	 * The host endianness matches the target endianness -
387 	 * we can use the mac addr directly from the message buffer.
388 	 */
389 	return tgt_mac_addr;
390 #endif
391 }
392 
393 /*
394  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
395  * @soc:	SOC handle
396  * @status:	Completion status
397  * @netbuf:	HTT buffer
398  */
399 static void
400 dp_htt_h2t_send_complete_free_netbuf(
401 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
402 {
403 	qdf_nbuf_free(netbuf);
404 }
405 
406 /*
407  * dp_htt_h2t_send_complete() - H2T completion handler
408  * @context:	Opaque context (HTT SOC handle)
409  * @htc_pkt:	HTC packet
410  */
411 static void
412 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
413 {
414 	void (*send_complete_part2)(
415 		void *soc, A_STATUS status, qdf_nbuf_t msdu);
416 	struct htt_soc *soc =  (struct htt_soc *) context;
417 	struct dp_htt_htc_pkt *htt_pkt;
418 	qdf_nbuf_t netbuf;
419 
420 	send_complete_part2 = htc_pkt->pPktContext;
421 
422 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
423 
424 	/* process (free or keep) the netbuf that held the message */
425 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
426 	/*
427 	 * adf sendcomplete is required for windows only
428 	 */
429 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
430 	if (send_complete_part2 != NULL) {
431 		send_complete_part2(
432 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
433 	}
434 	/* free the htt_htc_pkt / HTC_PACKET object */
435 	htt_htc_pkt_free(soc, htt_pkt);
436 }
437 
438 /*
439  * htt_h2t_ver_req_msg() - Send HTT version request message to target
440  * @htt_soc:	HTT SOC handle
441  *
442  * Return: 0 on success; error code on failure
443  */
444 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
445 {
446 	struct dp_htt_htc_pkt *pkt;
447 	qdf_nbuf_t msg;
448 	uint32_t *msg_word;
449 
450 	msg = qdf_nbuf_alloc(
451 		soc->osdev,
452 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
453 		/* reserve room for the HTC header */
454 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
455 	if (!msg)
456 		return QDF_STATUS_E_NOMEM;
457 
458 	/*
459 	 * Set the length of the message.
460 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
461 	 * separately during the below call to qdf_nbuf_push_head.
462 	 * The contribution from the HTC header is added separately inside HTC.
463 	 */
464 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
465 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
466 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
467 			__func__);
468 		return QDF_STATUS_E_FAILURE;
469 	}
470 
471 	/* fill in the message contents */
472 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
473 
474 	/* rewind beyond alignment pad to get to the HTC header reserved area */
475 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
476 
477 	*msg_word = 0;
478 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
479 
480 	pkt = htt_htc_pkt_alloc(soc);
481 	if (!pkt) {
482 		qdf_nbuf_free(msg);
483 		return QDF_STATUS_E_FAILURE;
484 	}
485 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
486 
487 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
488 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
489 		qdf_nbuf_len(msg), soc->htc_endpoint,
490 		1); /* tag - not relevant here */
491 
492 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
493 	DP_HTT_SEND_HTC_PKT(soc, pkt);
494 	return 0;
495 }
496 
497 /*
498  * htt_srng_setup() - Send SRNG setup message to target
499  * @htt_soc:	HTT SOC handle
500  * @mac_id:	MAC Id
501  * @hal_srng:	Opaque HAL SRNG pointer
502  * @hal_ring_type:	SRNG ring type
503  *
504  * Return: 0 on success; error code on failure
505  */
506 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
507 	int hal_ring_type)
508 {
509 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
510 	struct dp_htt_htc_pkt *pkt;
511 	qdf_nbuf_t htt_msg;
512 	uint32_t *msg_word;
513 	struct hal_srng_params srng_params;
514 	qdf_dma_addr_t hp_addr, tp_addr;
515 	uint32_t ring_entry_size =
516 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
517 	int htt_ring_type, htt_ring_id;
518 
519 	/* Sizes should be set in 4-byte words */
520 	ring_entry_size = ring_entry_size >> 2;
521 
522 	htt_msg = qdf_nbuf_alloc(soc->osdev,
523 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
524 		/* reserve room for the HTC header */
525 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
526 	if (!htt_msg)
527 		goto fail0;
528 
529 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
530 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
531 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
532 
533 	switch (hal_ring_type) {
534 	case RXDMA_BUF:
535 #ifdef QCA_HOST2FW_RXBUF_RING
536 		if (srng_params.ring_id ==
537 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
538 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
539 			htt_ring_type = HTT_SW_TO_SW_RING;
540 #ifdef IPA_OFFLOAD
541 		} else if (srng_params.ring_id ==
542 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
543 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
544 			htt_ring_type = HTT_SW_TO_SW_RING;
545 #endif
546 #else
547 		if (srng_params.ring_id ==
548 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
549 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
550 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
551 			htt_ring_type = HTT_SW_TO_HW_RING;
552 #endif
553 		} else if (srng_params.ring_id ==
554 #ifdef IPA_OFFLOAD
555 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
556 #else
557 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
558 #endif
559 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
560 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
561 			htt_ring_type = HTT_SW_TO_HW_RING;
562 		} else {
563 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
564 				   "%s: Ring %d currently not supported",
565 				   __func__, srng_params.ring_id);
566 			goto fail1;
567 		}
568 
569 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
570 			  "%s: ring_type %d ring_id %d",
571 			  __func__, hal_ring_type, srng_params.ring_id);
572 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
573 			  "%s: hp_addr 0x%llx tp_addr 0x%llx",
574 			  __func__, (uint64_t)hp_addr, (uint64_t)tp_addr);
575 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
576 			  "%s: htt_ring_id %d", __func__, htt_ring_id);
577 		break;
578 	case RXDMA_MONITOR_BUF:
579 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
580 		htt_ring_type = HTT_SW_TO_HW_RING;
581 		break;
582 	case RXDMA_MONITOR_STATUS:
583 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
584 		htt_ring_type = HTT_SW_TO_HW_RING;
585 		break;
586 	case RXDMA_MONITOR_DST:
587 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
588 		htt_ring_type = HTT_HW_TO_SW_RING;
589 		break;
590 	case RXDMA_MONITOR_DESC:
591 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
592 		htt_ring_type = HTT_SW_TO_HW_RING;
593 		break;
594 	case RXDMA_DST:
595 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
596 		htt_ring_type = HTT_HW_TO_SW_RING;
597 		break;
598 
599 	default:
600 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
601 			"%s: Ring currently not supported", __func__);
602 			goto fail1;
603 	}
604 
605 	/*
606 	 * Set the length of the message.
607 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
608 	 * separately during the below call to qdf_nbuf_push_head.
609 	 * The contribution from the HTC header is added separately inside HTC.
610 	 */
611 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
612 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
613 			"%s: Failed to expand head for SRING_SETUP msg",
614 			__func__);
615 		return QDF_STATUS_E_FAILURE;
616 	}
617 
618 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
619 
620 	/* rewind beyond alignment pad to get to the HTC header reserved area */
621 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
622 
623 	/* word 0 */
624 	*msg_word = 0;
625 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
626 
627 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
628 			(htt_ring_type == HTT_HW_TO_SW_RING))
629 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
630 			 DP_SW2HW_MACID(mac_id));
631 	else
632 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
633 
634 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
635 		  "%s: mac_id %d", __func__, mac_id);
636 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
637 	/* TODO: Discuss with FW on changing this to unique ID and using
638 	 * htt_ring_type to send the type of ring
639 	 */
640 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
641 
642 	/* word 1 */
643 	msg_word++;
644 	*msg_word = 0;
645 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
646 		srng_params.ring_base_paddr & 0xffffffff);
647 
648 	/* word 2 */
649 	msg_word++;
650 	*msg_word = 0;
651 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
652 		(uint64_t)srng_params.ring_base_paddr >> 32);
653 
654 	/* word 3 */
655 	msg_word++;
656 	*msg_word = 0;
657 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
658 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
659 		(ring_entry_size * srng_params.num_entries));
660 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
661 		  "%s: entry_size %d", __func__,
662 			 ring_entry_size);
663 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
664 		  "%s: num_entries %d", __func__,
665 			 srng_params.num_entries);
666 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
667 		  "%s: ring_size %d", __func__,
668 			 (ring_entry_size * srng_params.num_entries));
669 	if (htt_ring_type == HTT_SW_TO_HW_RING)
670 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
671 						*msg_word, 1);
672 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
673 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
674 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
675 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
676 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
677 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
678 
679 	/* word 4 */
680 	msg_word++;
681 	*msg_word = 0;
682 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
683 		hp_addr & 0xffffffff);
684 
685 	/* word 5 */
686 	msg_word++;
687 	*msg_word = 0;
688 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
689 		(uint64_t)hp_addr >> 32);
690 
691 	/* word 6 */
692 	msg_word++;
693 	*msg_word = 0;
694 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
695 		tp_addr & 0xffffffff);
696 
697 	/* word 7 */
698 	msg_word++;
699 	*msg_word = 0;
700 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
701 		(uint64_t)tp_addr >> 32);
702 
703 	/* word 8 */
704 	msg_word++;
705 	*msg_word = 0;
706 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
707 		srng_params.msi_addr & 0xffffffff);
708 
709 	/* word 9 */
710 	msg_word++;
711 	*msg_word = 0;
712 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
713 		(uint64_t)(srng_params.msi_addr) >> 32);
714 
715 	/* word 10 */
716 	msg_word++;
717 	*msg_word = 0;
718 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
719 		srng_params.msi_data);
720 
721 	/* word 11 */
722 	msg_word++;
723 	*msg_word = 0;
724 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
725 		srng_params.intr_batch_cntr_thres_entries *
726 		ring_entry_size);
727 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
728 		srng_params.intr_timer_thres_us >> 3);
729 
730 	/* word 12 */
731 	msg_word++;
732 	*msg_word = 0;
733 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
734 		/* TODO: Setting low threshold to 1/8th of ring size - see
735 		 * if this needs to be configurable
736 		 */
737 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
738 			srng_params.low_threshold);
739 	}
740 	/* "response_required" field should be set if a HTT response message is
741 	 * required after setting up the ring.
742 	 */
743 	pkt = htt_htc_pkt_alloc(soc);
744 	if (!pkt)
745 		goto fail1;
746 
747 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
748 
749 	SET_HTC_PACKET_INFO_TX(
750 		&pkt->htc_pkt,
751 		dp_htt_h2t_send_complete_free_netbuf,
752 		qdf_nbuf_data(htt_msg),
753 		qdf_nbuf_len(htt_msg),
754 		soc->htc_endpoint,
755 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
756 
757 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
758 	DP_HTT_SEND_HTC_PKT(soc, pkt);
759 
760 	return QDF_STATUS_SUCCESS;
761 
762 fail1:
763 	qdf_nbuf_free(htt_msg);
764 fail0:
765 	return QDF_STATUS_E_FAILURE;
766 }
767 
768 /*
769  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
770  * config message to target
771  * @htt_soc:	HTT SOC handle
772  * @pdev_id:	PDEV Id
773  * @hal_srng:	Opaque HAL SRNG pointer
774  * @hal_ring_type:	SRNG ring type
775  * @ring_buf_size:	SRNG buffer size
776  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
777  * Return: 0 on success; error code on failure
778  */
779 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
780 	int hal_ring_type, int ring_buf_size,
781 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
782 {
783 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
784 	struct dp_htt_htc_pkt *pkt;
785 	qdf_nbuf_t htt_msg;
786 	uint32_t *msg_word;
787 	struct hal_srng_params srng_params;
788 	uint32_t htt_ring_type, htt_ring_id;
789 	uint32_t tlv_filter;
790 
791 	htt_msg = qdf_nbuf_alloc(soc->osdev,
792 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
793 	/* reserve room for the HTC header */
794 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
795 	if (!htt_msg)
796 		goto fail0;
797 
798 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
799 
800 	switch (hal_ring_type) {
801 	case RXDMA_BUF:
802 #if QCA_HOST2FW_RXBUF_RING
803 		htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
804 		htt_ring_type = HTT_SW_TO_SW_RING;
805 #else
806 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
807 		htt_ring_type = HTT_SW_TO_HW_RING;
808 #endif
809 		break;
810 	case RXDMA_MONITOR_BUF:
811 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
812 		htt_ring_type = HTT_SW_TO_HW_RING;
813 		break;
814 	case RXDMA_MONITOR_STATUS:
815 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
816 		htt_ring_type = HTT_SW_TO_HW_RING;
817 		break;
818 	case RXDMA_MONITOR_DST:
819 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
820 		htt_ring_type = HTT_HW_TO_SW_RING;
821 		break;
822 	case RXDMA_MONITOR_DESC:
823 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
824 		htt_ring_type = HTT_SW_TO_HW_RING;
825 		break;
826 	case RXDMA_DST:
827 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
828 		htt_ring_type = HTT_HW_TO_SW_RING;
829 		break;
830 
831 	default:
832 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
833 			"%s: Ring currently not supported", __func__);
834 		goto fail1;
835 	}
836 
837 	/*
838 	 * Set the length of the message.
839 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
840 	 * separately during the below call to qdf_nbuf_push_head.
841 	 * The contribution from the HTC header is added separately inside HTC.
842 	 */
843 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
844 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
845 			"%s: Failed to expand head for RX Ring Cfg msg",
846 			__func__);
847 		goto fail1; /* failure */
848 	}
849 
850 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
851 
852 	/* rewind beyond alignment pad to get to the HTC header reserved area */
853 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
854 
855 	/* word 0 */
856 	*msg_word = 0;
857 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
858 
859 	/*
860 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
861 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
862 	 */
863 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
864 			htt_ring_type == HTT_SW_TO_HW_RING)
865 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
866 						DP_SW2HW_MACID(pdev_id));
867 
868 	/* TODO: Discuss with FW on changing this to unique ID and using
869 	 * htt_ring_type to send the type of ring
870 	 */
871 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
872 
873 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
874 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
875 
876 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
877 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
878 
879 	/* word 1 */
880 	msg_word++;
881 	*msg_word = 0;
882 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
883 		ring_buf_size);
884 
885 	/* word 2 */
886 	msg_word++;
887 	*msg_word = 0;
888 
889 	if (htt_tlv_filter->enable_fp) {
890 		/* TYPE: MGMT */
891 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
892 			FP, MGMT, 0000,
893 			(htt_tlv_filter->fp_mgmt_filter &
894 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
895 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
896 			FP, MGMT, 0001,
897 			(htt_tlv_filter->fp_mgmt_filter &
898 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
899 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
900 			FP, MGMT, 0010,
901 			(htt_tlv_filter->fp_mgmt_filter &
902 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
903 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
904 			FP, MGMT, 0011,
905 			(htt_tlv_filter->fp_mgmt_filter &
906 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
907 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
908 			FP, MGMT, 0100,
909 			(htt_tlv_filter->fp_mgmt_filter &
910 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
911 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
912 			FP, MGMT, 0101,
913 			(htt_tlv_filter->fp_mgmt_filter &
914 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
915 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
916 			FP, MGMT, 0110,
917 			(htt_tlv_filter->fp_mgmt_filter &
918 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
919 		/* reserved */
920 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
921 			MGMT, 0111,
922 			(htt_tlv_filter->fp_mgmt_filter &
923 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
924 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
925 			FP, MGMT, 1000,
926 			(htt_tlv_filter->fp_mgmt_filter &
927 			FILTER_MGMT_BEACON) ? 1 : 0);
928 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
929 			FP, MGMT, 1001,
930 			(htt_tlv_filter->fp_mgmt_filter &
931 			FILTER_MGMT_ATIM) ? 1 : 0);
932 	}
933 
934 	if (htt_tlv_filter->enable_md) {
935 			/* TYPE: MGMT */
936 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
937 			MD, MGMT, 0000,
938 			(htt_tlv_filter->md_mgmt_filter &
939 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
940 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
941 			MD, MGMT, 0001,
942 			(htt_tlv_filter->md_mgmt_filter &
943 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
944 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
945 			MD, MGMT, 0010,
946 			(htt_tlv_filter->md_mgmt_filter &
947 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
948 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
949 			MD, MGMT, 0011,
950 			(htt_tlv_filter->md_mgmt_filter &
951 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
952 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
953 			MD, MGMT, 0100,
954 			(htt_tlv_filter->md_mgmt_filter &
955 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
956 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
957 			MD, MGMT, 0101,
958 			(htt_tlv_filter->md_mgmt_filter &
959 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
960 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
961 			MD, MGMT, 0110,
962 			(htt_tlv_filter->md_mgmt_filter &
963 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
964 		/* reserved */
965 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
966 			MGMT, 0111,
967 			(htt_tlv_filter->md_mgmt_filter &
968 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
969 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
970 			MD, MGMT, 1000,
971 			(htt_tlv_filter->md_mgmt_filter &
972 			FILTER_MGMT_BEACON) ? 1 : 0);
973 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
974 			MD, MGMT, 1001,
975 			(htt_tlv_filter->md_mgmt_filter &
976 			FILTER_MGMT_ATIM) ? 1 : 0);
977 	}
978 
979 	if (htt_tlv_filter->enable_mo) {
980 		/* TYPE: MGMT */
981 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
982 			MO, MGMT, 0000,
983 			(htt_tlv_filter->mo_mgmt_filter &
984 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
985 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
986 			MO, MGMT, 0001,
987 			(htt_tlv_filter->mo_mgmt_filter &
988 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
989 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
990 			MO, MGMT, 0010,
991 			(htt_tlv_filter->mo_mgmt_filter &
992 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
993 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
994 			MO, MGMT, 0011,
995 			(htt_tlv_filter->mo_mgmt_filter &
996 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
997 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
998 			MO, MGMT, 0100,
999 			(htt_tlv_filter->mo_mgmt_filter &
1000 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1001 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1002 			MO, MGMT, 0101,
1003 			(htt_tlv_filter->mo_mgmt_filter &
1004 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1005 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1006 			MO, MGMT, 0110,
1007 			(htt_tlv_filter->mo_mgmt_filter &
1008 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1009 		/* reserved */
1010 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1011 			MGMT, 0111,
1012 			(htt_tlv_filter->mo_mgmt_filter &
1013 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1014 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1015 			MO, MGMT, 1000,
1016 			(htt_tlv_filter->mo_mgmt_filter &
1017 			FILTER_MGMT_BEACON) ? 1 : 0);
1018 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1019 			MO, MGMT, 1001,
1020 			(htt_tlv_filter->mo_mgmt_filter &
1021 			FILTER_MGMT_ATIM) ? 1 : 0);
1022 	}
1023 
1024 	/* word 3 */
1025 	msg_word++;
1026 	*msg_word = 0;
1027 
1028 	if (htt_tlv_filter->enable_fp) {
1029 		/* TYPE: MGMT */
1030 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1031 			FP, MGMT, 1010,
1032 			(htt_tlv_filter->fp_mgmt_filter &
1033 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1034 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1035 			FP, MGMT, 1011,
1036 			(htt_tlv_filter->fp_mgmt_filter &
1037 			FILTER_MGMT_AUTH) ? 1 : 0);
1038 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1039 			FP, MGMT, 1100,
1040 			(htt_tlv_filter->fp_mgmt_filter &
1041 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1042 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1043 			FP, MGMT, 1101,
1044 			(htt_tlv_filter->fp_mgmt_filter &
1045 			FILTER_MGMT_ACTION) ? 1 : 0);
1046 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1047 			FP, MGMT, 1110,
1048 			(htt_tlv_filter->fp_mgmt_filter &
1049 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1050 		/* reserved*/
1051 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1052 			MGMT, 1111,
1053 			(htt_tlv_filter->fp_mgmt_filter &
1054 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1055 	}
1056 
1057 	if (htt_tlv_filter->enable_md) {
1058 			/* TYPE: MGMT */
1059 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1060 			MD, MGMT, 1010,
1061 			(htt_tlv_filter->md_mgmt_filter &
1062 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1063 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1064 			MD, MGMT, 1011,
1065 			(htt_tlv_filter->md_mgmt_filter &
1066 			FILTER_MGMT_AUTH) ? 1 : 0);
1067 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1068 			MD, MGMT, 1100,
1069 			(htt_tlv_filter->md_mgmt_filter &
1070 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1071 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1072 			MD, MGMT, 1101,
1073 			(htt_tlv_filter->md_mgmt_filter &
1074 			FILTER_MGMT_ACTION) ? 1 : 0);
1075 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1076 			MD, MGMT, 1110,
1077 			(htt_tlv_filter->md_mgmt_filter &
1078 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1079 	}
1080 
1081 	if (htt_tlv_filter->enable_mo) {
1082 		/* TYPE: MGMT */
1083 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1084 			MO, MGMT, 1010,
1085 			(htt_tlv_filter->mo_mgmt_filter &
1086 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1087 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1088 			MO, MGMT, 1011,
1089 			(htt_tlv_filter->mo_mgmt_filter &
1090 			FILTER_MGMT_AUTH) ? 1 : 0);
1091 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1092 			MO, MGMT, 1100,
1093 			(htt_tlv_filter->mo_mgmt_filter &
1094 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1095 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1096 			MO, MGMT, 1101,
1097 			(htt_tlv_filter->mo_mgmt_filter &
1098 			FILTER_MGMT_ACTION) ? 1 : 0);
1099 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1100 			MO, MGMT, 1110,
1101 			(htt_tlv_filter->mo_mgmt_filter &
1102 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1103 		/* reserved*/
1104 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1105 			MGMT, 1111,
1106 			(htt_tlv_filter->mo_mgmt_filter &
1107 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1108 	}
1109 
1110 	/* word 4 */
1111 	msg_word++;
1112 	*msg_word = 0;
1113 
1114 	if (htt_tlv_filter->enable_fp) {
1115 		/* TYPE: CTRL */
1116 		/* reserved */
1117 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1118 			CTRL, 0000,
1119 			(htt_tlv_filter->fp_ctrl_filter &
1120 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1121 		/* reserved */
1122 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1123 			CTRL, 0001,
1124 			(htt_tlv_filter->fp_ctrl_filter &
1125 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1126 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1127 			CTRL, 0010,
1128 			(htt_tlv_filter->fp_ctrl_filter &
1129 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1130 		/* reserved */
1131 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1132 			CTRL, 0011,
1133 			(htt_tlv_filter->fp_ctrl_filter &
1134 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1135 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1136 			CTRL, 0100,
1137 			(htt_tlv_filter->fp_ctrl_filter &
1138 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1139 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1140 			CTRL, 0101,
1141 			(htt_tlv_filter->fp_ctrl_filter &
1142 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1143 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1144 			CTRL, 0110,
1145 			(htt_tlv_filter->fp_ctrl_filter &
1146 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1147 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1148 			CTRL, 0111,
1149 			(htt_tlv_filter->fp_ctrl_filter &
1150 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1151 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1152 			CTRL, 1000,
1153 			(htt_tlv_filter->fp_ctrl_filter &
1154 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1155 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1156 			CTRL, 1001,
1157 			(htt_tlv_filter->fp_ctrl_filter &
1158 			FILTER_CTRL_BA) ? 1 : 0);
1159 	}
1160 
1161 	if (htt_tlv_filter->enable_md) {
1162 		/* TYPE: CTRL */
1163 		/* reserved */
1164 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1165 			CTRL, 0000,
1166 			(htt_tlv_filter->md_ctrl_filter &
1167 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1168 		/* reserved */
1169 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1170 			CTRL, 0001,
1171 			(htt_tlv_filter->md_ctrl_filter &
1172 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1173 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1174 			CTRL, 0010,
1175 			(htt_tlv_filter->md_ctrl_filter &
1176 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1177 		/* reserved */
1178 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1179 			CTRL, 0011,
1180 			(htt_tlv_filter->md_ctrl_filter &
1181 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1182 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1183 			CTRL, 0100,
1184 			(htt_tlv_filter->md_ctrl_filter &
1185 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1186 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1187 			CTRL, 0101,
1188 			(htt_tlv_filter->md_ctrl_filter &
1189 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1190 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1191 			CTRL, 0110,
1192 			(htt_tlv_filter->md_ctrl_filter &
1193 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1194 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1195 			CTRL, 0111,
1196 			(htt_tlv_filter->md_ctrl_filter &
1197 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1198 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1199 			CTRL, 1000,
1200 			(htt_tlv_filter->md_ctrl_filter &
1201 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1202 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1203 			CTRL, 1001,
1204 			(htt_tlv_filter->md_ctrl_filter &
1205 			FILTER_CTRL_BA) ? 1 : 0);
1206 	}
1207 
1208 	if (htt_tlv_filter->enable_mo) {
1209 		/* TYPE: CTRL */
1210 		/* reserved */
1211 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1212 			CTRL, 0000,
1213 			(htt_tlv_filter->mo_ctrl_filter &
1214 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1215 		/* reserved */
1216 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1217 			CTRL, 0001,
1218 			(htt_tlv_filter->mo_ctrl_filter &
1219 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1220 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1221 			CTRL, 0010,
1222 			(htt_tlv_filter->mo_ctrl_filter &
1223 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1224 		/* reserved */
1225 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1226 			CTRL, 0011,
1227 			(htt_tlv_filter->mo_ctrl_filter &
1228 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1229 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1230 			CTRL, 0100,
1231 			(htt_tlv_filter->mo_ctrl_filter &
1232 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1233 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1234 			CTRL, 0101,
1235 			(htt_tlv_filter->mo_ctrl_filter &
1236 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1237 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1238 			CTRL, 0110,
1239 			(htt_tlv_filter->mo_ctrl_filter &
1240 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1241 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1242 			CTRL, 0111,
1243 			(htt_tlv_filter->mo_ctrl_filter &
1244 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1245 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1246 			CTRL, 1000,
1247 			(htt_tlv_filter->mo_ctrl_filter &
1248 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1249 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1250 			CTRL, 1001,
1251 			(htt_tlv_filter->mo_ctrl_filter &
1252 			FILTER_CTRL_BA) ? 1 : 0);
1253 	}
1254 
1255 	/* word 5 */
1256 	msg_word++;
1257 	*msg_word = 0;
1258 	if (htt_tlv_filter->enable_fp) {
1259 		/* TYPE: CTRL */
1260 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1261 			CTRL, 1010,
1262 			(htt_tlv_filter->fp_ctrl_filter &
1263 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1264 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1265 			CTRL, 1011,
1266 			(htt_tlv_filter->fp_ctrl_filter &
1267 			FILTER_CTRL_RTS) ? 1 : 0);
1268 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1269 			CTRL, 1100,
1270 			(htt_tlv_filter->fp_ctrl_filter &
1271 			FILTER_CTRL_CTS) ? 1 : 0);
1272 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1273 			CTRL, 1101,
1274 			(htt_tlv_filter->fp_ctrl_filter &
1275 			FILTER_CTRL_ACK) ? 1 : 0);
1276 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1277 			CTRL, 1110,
1278 			(htt_tlv_filter->fp_ctrl_filter &
1279 			FILTER_CTRL_CFEND) ? 1 : 0);
1280 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1281 			CTRL, 1111,
1282 			(htt_tlv_filter->fp_ctrl_filter &
1283 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1284 		/* TYPE: DATA */
1285 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1286 			DATA, MCAST,
1287 			(htt_tlv_filter->fp_data_filter &
1288 			FILTER_DATA_MCAST) ? 1 : 0);
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1290 			DATA, UCAST,
1291 			(htt_tlv_filter->fp_data_filter &
1292 			FILTER_DATA_UCAST) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1294 			DATA, NULL,
1295 			(htt_tlv_filter->fp_data_filter &
1296 			FILTER_DATA_NULL) ? 1 : 0);
1297 	}
1298 
1299 	if (htt_tlv_filter->enable_md) {
1300 		/* TYPE: CTRL */
1301 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1302 			CTRL, 1010,
1303 			(htt_tlv_filter->md_ctrl_filter &
1304 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1305 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1306 			CTRL, 1011,
1307 			(htt_tlv_filter->md_ctrl_filter &
1308 			FILTER_CTRL_RTS) ? 1 : 0);
1309 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1310 			CTRL, 1100,
1311 			(htt_tlv_filter->md_ctrl_filter &
1312 			FILTER_CTRL_CTS) ? 1 : 0);
1313 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1314 			CTRL, 1101,
1315 			(htt_tlv_filter->md_ctrl_filter &
1316 			FILTER_CTRL_ACK) ? 1 : 0);
1317 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1318 			CTRL, 1110,
1319 			(htt_tlv_filter->md_ctrl_filter &
1320 			FILTER_CTRL_CFEND) ? 1 : 0);
1321 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1322 			CTRL, 1111,
1323 			(htt_tlv_filter->md_ctrl_filter &
1324 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1325 		/* TYPE: DATA */
1326 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1327 			DATA, MCAST,
1328 			(htt_tlv_filter->md_data_filter &
1329 			FILTER_DATA_MCAST) ? 1 : 0);
1330 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1331 			DATA, UCAST,
1332 			(htt_tlv_filter->md_data_filter &
1333 			FILTER_DATA_UCAST) ? 1 : 0);
1334 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1335 			DATA, NULL,
1336 			(htt_tlv_filter->md_data_filter &
1337 			FILTER_DATA_NULL) ? 1 : 0);
1338 	}
1339 
1340 	if (htt_tlv_filter->enable_mo) {
1341 		/* TYPE: CTRL */
1342 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1343 			CTRL, 1010,
1344 			(htt_tlv_filter->mo_ctrl_filter &
1345 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1346 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1347 			CTRL, 1011,
1348 			(htt_tlv_filter->mo_ctrl_filter &
1349 			FILTER_CTRL_RTS) ? 1 : 0);
1350 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1351 			CTRL, 1100,
1352 			(htt_tlv_filter->mo_ctrl_filter &
1353 			FILTER_CTRL_CTS) ? 1 : 0);
1354 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1355 			CTRL, 1101,
1356 			(htt_tlv_filter->mo_ctrl_filter &
1357 			FILTER_CTRL_ACK) ? 1 : 0);
1358 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1359 			CTRL, 1110,
1360 			(htt_tlv_filter->mo_ctrl_filter &
1361 			FILTER_CTRL_CFEND) ? 1 : 0);
1362 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1363 			CTRL, 1111,
1364 			(htt_tlv_filter->mo_ctrl_filter &
1365 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1366 		/* TYPE: DATA */
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1368 			DATA, MCAST,
1369 			(htt_tlv_filter->mo_data_filter &
1370 			FILTER_DATA_MCAST) ? 1 : 0);
1371 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1372 			DATA, UCAST,
1373 			(htt_tlv_filter->mo_data_filter &
1374 			FILTER_DATA_UCAST) ? 1 : 0);
1375 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1376 			DATA, NULL,
1377 			(htt_tlv_filter->mo_data_filter &
1378 			FILTER_DATA_NULL) ? 1 : 0);
1379 	}
1380 
1381 	/* word 6 */
1382 	msg_word++;
1383 	*msg_word = 0;
1384 	tlv_filter = 0;
1385 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1386 		htt_tlv_filter->mpdu_start);
1387 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1388 		htt_tlv_filter->msdu_start);
1389 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1390 		htt_tlv_filter->packet);
1391 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1392 		htt_tlv_filter->msdu_end);
1393 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1394 		htt_tlv_filter->mpdu_end);
1395 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1396 		htt_tlv_filter->packet_header);
1397 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1398 		htt_tlv_filter->attention);
1399 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1400 		htt_tlv_filter->ppdu_start);
1401 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1402 		htt_tlv_filter->ppdu_end);
1403 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1404 		htt_tlv_filter->ppdu_end_user_stats);
1405 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1406 		PPDU_END_USER_STATS_EXT,
1407 		htt_tlv_filter->ppdu_end_user_stats_ext);
1408 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1409 		htt_tlv_filter->ppdu_end_status_done);
1410 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1411 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1412 		 htt_tlv_filter->header_per_msdu);
1413 
1414 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1415 
1416 	/* "response_required" field should be set if a HTT response message is
1417 	 * required after setting up the ring.
1418 	 */
1419 	pkt = htt_htc_pkt_alloc(soc);
1420 	if (!pkt)
1421 		goto fail1;
1422 
1423 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1424 
1425 	SET_HTC_PACKET_INFO_TX(
1426 		&pkt->htc_pkt,
1427 		dp_htt_h2t_send_complete_free_netbuf,
1428 		qdf_nbuf_data(htt_msg),
1429 		qdf_nbuf_len(htt_msg),
1430 		soc->htc_endpoint,
1431 		1); /* tag - not relevant here */
1432 
1433 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1434 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1435 	return QDF_STATUS_SUCCESS;
1436 
1437 fail1:
1438 	qdf_nbuf_free(htt_msg);
1439 fail0:
1440 	return QDF_STATUS_E_FAILURE;
1441 }
1442 
1443 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1444 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1445 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1446 
1447 {
1448 	uint32_t pdev_id;
1449 	uint32_t *msg_word = NULL;
1450 	uint32_t msg_remain_len = 0;
1451 
1452 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1453 
1454 	/*COOKIE MSB*/
1455 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1456 
1457 	/* stats message length + 16 size of HTT header*/
1458 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1459 				(uint32_t)DP_EXT_MSG_LENGTH);
1460 
1461 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1462 			msg_word,  msg_remain_len,
1463 			WDI_NO_VAL, pdev_id);
1464 
1465 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1466 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1467 	}
1468 	/* Need to be freed here as WDI handler will
1469 	 * make a copy of pkt to send data to application
1470 	 */
1471 	qdf_nbuf_free(htt_msg);
1472 	return QDF_STATUS_SUCCESS;
1473 }
1474 #else
1475 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1476 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1477 {
1478 	return QDF_STATUS_E_NOSUPPORT;
1479 }
1480 #endif
1481 
1482 /**
1483  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1484  * @htt_stats: htt stats info
1485  *
1486  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1487  * contains sub messages which are identified by a TLV header.
1488  * In this function we will process the stream of T2H messages and read all the
1489  * TLV contained in the message.
1490  *
1491  * THe following cases have been taken care of
1492  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1493  *		In this case the buffer will contain multiple tlvs.
1494  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1495  *		Only one tlv will be contained in the HTT message and this tag
1496  *		will extend onto the next buffer.
1497  * Case 3: When the buffer is the continuation of the previous message
1498  * Case 4: tlv length is 0. which will indicate the end of message
1499  *
1500  * return: void
1501  */
1502 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1503 					struct dp_soc *soc)
1504 {
1505 	htt_tlv_tag_t tlv_type = 0xff;
1506 	qdf_nbuf_t htt_msg = NULL;
1507 	uint32_t *msg_word;
1508 	uint8_t *tlv_buf_head = NULL;
1509 	uint8_t *tlv_buf_tail = NULL;
1510 	uint32_t msg_remain_len = 0;
1511 	uint32_t tlv_remain_len = 0;
1512 	uint32_t *tlv_start;
1513 	int cookie_val;
1514 	int cookie_msb;
1515 	int pdev_id;
1516 	bool copy_stats = false;
1517 	struct dp_pdev *pdev;
1518 
1519 	/* Process node in the HTT message queue */
1520 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1521 		!= NULL) {
1522 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1523 		cookie_val = *(msg_word + 1);
1524 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1525 					*(msg_word +
1526 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1527 
1528 		if (cookie_val) {
1529 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1530 					== QDF_STATUS_SUCCESS) {
1531 				continue;
1532 			}
1533 		}
1534 
1535 		cookie_msb = *(msg_word + 2);
1536 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1537 		pdev = soc->pdev_list[pdev_id];
1538 
1539 		if (cookie_msb >> 2) {
1540 			copy_stats = true;
1541 		}
1542 
1543 		/* read 5th word */
1544 		msg_word = msg_word + 4;
1545 		msg_remain_len = qdf_min(htt_stats->msg_len,
1546 				(uint32_t) DP_EXT_MSG_LENGTH);
1547 		/* Keep processing the node till node length is 0 */
1548 		while (msg_remain_len) {
1549 			/*
1550 			 * if message is not a continuation of previous message
1551 			 * read the tlv type and tlv length
1552 			 */
1553 			if (!tlv_buf_head) {
1554 				tlv_type = HTT_STATS_TLV_TAG_GET(
1555 						*msg_word);
1556 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1557 						*msg_word);
1558 			}
1559 
1560 			if (tlv_remain_len == 0) {
1561 				msg_remain_len = 0;
1562 
1563 				if (tlv_buf_head) {
1564 					qdf_mem_free(tlv_buf_head);
1565 					tlv_buf_head = NULL;
1566 					tlv_buf_tail = NULL;
1567 				}
1568 
1569 				goto error;
1570 			}
1571 
1572 			if (!tlv_buf_head)
1573 				tlv_remain_len += HTT_TLV_HDR_LEN;
1574 
1575 			if ((tlv_remain_len <= msg_remain_len)) {
1576 				/* Case 3 */
1577 				if (tlv_buf_head) {
1578 					qdf_mem_copy(tlv_buf_tail,
1579 							(uint8_t *)msg_word,
1580 							tlv_remain_len);
1581 					tlv_start = (uint32_t *)tlv_buf_head;
1582 				} else {
1583 					/* Case 1 */
1584 					tlv_start = msg_word;
1585 				}
1586 
1587 				if (copy_stats)
1588 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1589 				else
1590 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1591 
1592 				msg_remain_len -= tlv_remain_len;
1593 
1594 				msg_word = (uint32_t *)
1595 					(((uint8_t *)msg_word) +
1596 					tlv_remain_len);
1597 
1598 				tlv_remain_len = 0;
1599 
1600 				if (tlv_buf_head) {
1601 					qdf_mem_free(tlv_buf_head);
1602 					tlv_buf_head = NULL;
1603 					tlv_buf_tail = NULL;
1604 				}
1605 
1606 			} else { /* tlv_remain_len > msg_remain_len */
1607 				/* Case 2 & 3 */
1608 				if (!tlv_buf_head) {
1609 					tlv_buf_head = qdf_mem_malloc(
1610 							tlv_remain_len);
1611 
1612 					if (!tlv_buf_head) {
1613 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1614 								QDF_TRACE_LEVEL_ERROR,
1615 								"Alloc failed");
1616 						goto error;
1617 					}
1618 
1619 					tlv_buf_tail = tlv_buf_head;
1620 				}
1621 
1622 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1623 						msg_remain_len);
1624 				tlv_remain_len -= msg_remain_len;
1625 				tlv_buf_tail += msg_remain_len;
1626 			}
1627 		}
1628 
1629 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1630 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1631 		}
1632 
1633 		qdf_nbuf_free(htt_msg);
1634 	}
1635 	return;
1636 
1637 error:
1638 	qdf_nbuf_free(htt_msg);
1639 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1640 			!= NULL)
1641 		qdf_nbuf_free(htt_msg);
1642 }
1643 
1644 void htt_t2h_stats_handler(void *context)
1645 {
1646 	struct dp_soc *soc = (struct dp_soc *)context;
1647 	struct htt_stats_context htt_stats;
1648 	uint32_t *msg_word;
1649 	qdf_nbuf_t htt_msg = NULL;
1650 	uint8_t done;
1651 	uint8_t rem_stats;
1652 
1653 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1654 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1655 			"soc: 0x%pK, init_done: %d", soc,
1656 			qdf_atomic_read(&soc->cmn_init_done));
1657 		return;
1658 	}
1659 
1660 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1661 	qdf_nbuf_queue_init(&htt_stats.msg);
1662 
1663 	/* pull one completed stats from soc->htt_stats_msg and process */
1664 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1665 	if (!soc->htt_stats.num_stats) {
1666 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1667 		return;
1668 	}
1669 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1670 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1671 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1672 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1673 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1674 		/*
1675 		 * Done bit signifies that this is the last T2H buffer in the
1676 		 * stream of HTT EXT STATS message
1677 		 */
1678 		if (done)
1679 			break;
1680 	}
1681 	rem_stats = --soc->htt_stats.num_stats;
1682 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1683 
1684 	dp_process_htt_stat_msg(&htt_stats, soc);
1685 	/* If there are more stats to process, schedule stats work again */
1686 	if (rem_stats)
1687 		qdf_sched_work(0, &soc->htt_stats.work);
1688 }
1689 
1690 /*
1691  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1692  * if a new peer id arrives in a PPDU
1693  * pdev: DP pdev handle
1694  * @peer_id : peer unique identifier
1695  * @ppdu_info: per ppdu tlv structure
1696  *
1697  * return:user index to be populated
1698  */
1699 #ifdef FEATURE_PERPKT_INFO
1700 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1701 						uint16_t peer_id,
1702 						struct ppdu_info *ppdu_info)
1703 {
1704 	uint8_t user_index = 0;
1705 	struct cdp_tx_completion_ppdu *ppdu_desc;
1706 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1707 
1708 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1709 
1710 	while ((user_index + 1) <= ppdu_info->last_user) {
1711 		ppdu_user_desc = &ppdu_desc->user[user_index];
1712 		if (ppdu_user_desc->peer_id != peer_id) {
1713 			user_index++;
1714 			continue;
1715 		} else {
1716 			/* Max users possible is 8 so user array index should
1717 			 * not exceed 7
1718 			 */
1719 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1720 			return user_index;
1721 		}
1722 	}
1723 
1724 	ppdu_info->last_user++;
1725 	/* Max users possible is 8 so last user should not exceed 8 */
1726 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1727 	return ppdu_info->last_user - 1;
1728 }
1729 
1730 /*
1731  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1732  * pdev: DP pdev handle
1733  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1734  * @ppdu_info: per ppdu tlv structure
1735  *
1736  * return:void
1737  */
1738 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1739 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1740 {
1741 	uint16_t frame_type;
1742 	uint16_t freq;
1743 	struct dp_soc *soc = NULL;
1744 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1745 
1746 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1747 
1748 	tag_buf += 2;
1749 	ppdu_desc->num_users =
1750 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1751 	tag_buf++;
1752 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1753 
1754 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1755 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1756 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1757 	else if ((frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR) ||
1758 		 (frame_type == HTT_STATS_FTYPE_SGEN_BAR))
1759 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1760 	else
1761 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1762 
1763 	tag_buf += 2;
1764 	ppdu_desc->tx_duration = *tag_buf;
1765 	tag_buf += 3;
1766 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1767 
1768 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1769 					ppdu_desc->tx_duration;
1770 	/* Ack time stamp is same as end time stamp*/
1771 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1772 
1773 	tag_buf++;
1774 
1775 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1776 	if (freq != ppdu_desc->channel) {
1777 		soc = pdev->soc;
1778 		ppdu_desc->channel = freq;
1779 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1780 			pdev->operating_channel =
1781 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
1782 	}
1783 
1784 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1785 }
1786 
1787 /*
1788  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1789  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1790  * @ppdu_info: per ppdu tlv structure
1791  *
1792  * return:void
1793  */
1794 static void dp_process_ppdu_stats_user_common_tlv(
1795 		struct dp_pdev *pdev, uint32_t *tag_buf,
1796 		struct ppdu_info *ppdu_info)
1797 {
1798 	uint16_t peer_id;
1799 	struct cdp_tx_completion_ppdu *ppdu_desc;
1800 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1801 	uint8_t curr_user_index = 0;
1802 
1803 	ppdu_desc =
1804 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1805 
1806 	tag_buf++;
1807 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1808 
1809 	curr_user_index =
1810 		dp_get_ppdu_info_user_index(pdev,
1811 					    peer_id, ppdu_info);
1812 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1813 
1814 	if (peer_id == DP_SCAN_PEER_ID) {
1815 		ppdu_desc->vdev_id =
1816 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
1817 	} else {
1818 		if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
1819 			return;
1820 	}
1821 
1822 	ppdu_user_desc->peer_id = peer_id;
1823 
1824 	tag_buf++;
1825 
1826 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1827 		ppdu_user_desc->delayed_ba = 1;
1828 	}
1829 
1830 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1831 		ppdu_user_desc->is_mcast = true;
1832 		ppdu_user_desc->mpdu_tried_mcast =
1833 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1834 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1835 	} else {
1836 		ppdu_user_desc->mpdu_tried_ucast =
1837 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1838 	}
1839 
1840 	tag_buf++;
1841 
1842 	ppdu_user_desc->qos_ctrl =
1843 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1844 	ppdu_user_desc->frame_ctrl =
1845 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1846 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1847 
1848 	if (ppdu_user_desc->delayed_ba) {
1849 		ppdu_user_desc->mpdu_success = 0;
1850 		ppdu_user_desc->mpdu_tried_mcast = 0;
1851 		ppdu_user_desc->mpdu_tried_ucast = 0;
1852 	}
1853 }
1854 
1855 
1856 /**
1857  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1858  * @pdev: DP pdev handle
1859  * @tag_buf: T2H message buffer carrying the user rate TLV
1860  * @ppdu_info: per ppdu tlv structure
1861  *
1862  * return:void
1863  */
1864 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1865 		uint32_t *tag_buf,
1866 		struct ppdu_info *ppdu_info)
1867 {
1868 	uint16_t peer_id;
1869 	struct dp_peer *peer;
1870 	struct cdp_tx_completion_ppdu *ppdu_desc;
1871 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1872 	uint8_t curr_user_index = 0;
1873 	struct dp_vdev *vdev;
1874 
1875 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1876 
1877 	tag_buf++;
1878 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1879 
1880 	curr_user_index =
1881 		dp_get_ppdu_info_user_index(pdev,
1882 					    peer_id, ppdu_info);
1883 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1884 	if (peer_id == DP_SCAN_PEER_ID) {
1885 		vdev =
1886 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
1887 							  ppdu_desc->vdev_id);
1888 		if (!vdev)
1889 			return;
1890 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
1891 			     DP_MAC_ADDR_LEN);
1892 	} else {
1893 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1894 		if (!peer)
1895 			return;
1896 		qdf_mem_copy(ppdu_user_desc->mac_addr,
1897 			     peer->mac_addr.raw, DP_MAC_ADDR_LEN);
1898 		dp_peer_unref_del_find_by_id(peer);
1899 	}
1900 
1901 	ppdu_user_desc->peer_id = peer_id;
1902 
1903 	ppdu_user_desc->tid =
1904 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1905 
1906 	tag_buf += 2;
1907 
1908 	ppdu_user_desc->ru_tones =
1909 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1910 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1911 
1912 	tag_buf += 2;
1913 
1914 	ppdu_user_desc->ppdu_type =
1915 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1916 
1917 	tag_buf++;
1918 	ppdu_user_desc->tx_rate = *tag_buf;
1919 
1920 	ppdu_user_desc->ltf_size =
1921 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1922 	ppdu_user_desc->stbc =
1923 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1924 	ppdu_user_desc->he_re =
1925 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1926 	ppdu_user_desc->txbf =
1927 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
1928 	ppdu_user_desc->bw =
1929 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
1930 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
1931 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
1932 	ppdu_user_desc->preamble =
1933 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
1934 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
1935 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
1936 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
1937 }
1938 
1939 /*
1940  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
1941  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1942  * pdev: DP PDEV handle
1943  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1944  * @ppdu_info: per ppdu tlv structure
1945  *
1946  * return:void
1947  */
1948 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
1949 		struct dp_pdev *pdev, uint32_t *tag_buf,
1950 		struct ppdu_info *ppdu_info)
1951 {
1952 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
1953 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
1954 
1955 	struct cdp_tx_completion_ppdu *ppdu_desc;
1956 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1957 	uint8_t curr_user_index = 0;
1958 	uint16_t peer_id;
1959 
1960 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1961 
1962 	tag_buf++;
1963 
1964 	peer_id =
1965 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1966 
1967 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
1968 		return;
1969 
1970 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1971 
1972 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1973 	ppdu_user_desc->peer_id = peer_id;
1974 
1975 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1976 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1977 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1978 }
1979 
1980 /*
1981  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
1982  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1983  * soc: DP SOC handle
1984  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1985  * @ppdu_info: per ppdu tlv structure
1986  *
1987  * return:void
1988  */
1989 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
1990 		struct dp_pdev *pdev, uint32_t *tag_buf,
1991 		struct ppdu_info *ppdu_info)
1992 {
1993 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
1994 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
1995 
1996 	struct cdp_tx_completion_ppdu *ppdu_desc;
1997 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1998 	uint8_t curr_user_index = 0;
1999 	uint16_t peer_id;
2000 
2001 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2002 
2003 	tag_buf++;
2004 
2005 	peer_id =
2006 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2007 
2008 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2009 		return;
2010 
2011 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2012 
2013 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2014 	ppdu_user_desc->peer_id = peer_id;
2015 
2016 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2017 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2018 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2019 }
2020 
2021 /*
2022  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2023  * htt_ppdu_stats_user_cmpltn_common_tlv
2024  * soc: DP SOC handle
2025  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2026  * @ppdu_info: per ppdu tlv structure
2027  *
2028  * return:void
2029  */
2030 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2031 		struct dp_pdev *pdev, uint32_t *tag_buf,
2032 		struct ppdu_info *ppdu_info)
2033 {
2034 	uint16_t peer_id;
2035 	struct cdp_tx_completion_ppdu *ppdu_desc;
2036 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2037 	uint8_t curr_user_index = 0;
2038 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2039 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2040 
2041 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2042 
2043 	tag_buf++;
2044 	peer_id =
2045 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2046 
2047 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2048 		return;
2049 
2050 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2051 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2052 	ppdu_user_desc->peer_id = peer_id;
2053 
2054 	ppdu_user_desc->completion_status =
2055 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2056 				*tag_buf);
2057 
2058 	ppdu_user_desc->tid =
2059 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2060 
2061 
2062 	tag_buf++;
2063 	if (qdf_likely(ppdu_user_desc->completion_status)) {
2064 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2065 		ppdu_user_desc->ack_rssi_valid = 1;
2066 	} else {
2067 		ppdu_user_desc->ack_rssi_valid = 0;
2068 	}
2069 
2070 	tag_buf++;
2071 
2072 	ppdu_user_desc->mpdu_success =
2073 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2074 
2075 	tag_buf++;
2076 
2077 	ppdu_user_desc->long_retries =
2078 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2079 
2080 	ppdu_user_desc->short_retries =
2081 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2082 	ppdu_user_desc->retry_msdus =
2083 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2084 
2085 	ppdu_user_desc->is_ampdu =
2086 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2087 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2088 
2089 }
2090 
2091 /*
2092  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2093  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2094  * pdev: DP PDEV handle
2095  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2096  * @ppdu_info: per ppdu tlv structure
2097  *
2098  * return:void
2099  */
2100 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2101 		struct dp_pdev *pdev, uint32_t *tag_buf,
2102 		struct ppdu_info *ppdu_info)
2103 {
2104 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2105 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2106 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2107 	struct cdp_tx_completion_ppdu *ppdu_desc;
2108 	uint8_t curr_user_index = 0;
2109 	uint16_t peer_id;
2110 
2111 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2112 
2113 	tag_buf++;
2114 
2115 	peer_id =
2116 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2117 
2118 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2119 		return;
2120 
2121 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2122 
2123 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2124 	ppdu_user_desc->peer_id = peer_id;
2125 
2126 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2127 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2128 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2129 }
2130 
2131 /*
2132  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2133  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2134  * pdev: DP PDEV handle
2135  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2136  * @ppdu_info: per ppdu tlv structure
2137  *
2138  * return:void
2139  */
2140 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2141 		struct dp_pdev *pdev, uint32_t *tag_buf,
2142 		struct ppdu_info *ppdu_info)
2143 {
2144 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2145 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2146 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2147 	struct cdp_tx_completion_ppdu *ppdu_desc;
2148 	uint8_t curr_user_index = 0;
2149 	uint16_t peer_id;
2150 
2151 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2152 
2153 	tag_buf++;
2154 
2155 	peer_id =
2156 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2157 
2158 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2159 		return;
2160 
2161 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2162 
2163 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2164 	ppdu_user_desc->peer_id = peer_id;
2165 
2166 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2167 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2168 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2169 }
2170 
2171 /*
2172  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2173  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2174  * pdev: DP PDE handle
2175  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2176  * @ppdu_info: per ppdu tlv structure
2177  *
2178  * return:void
2179  */
2180 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2181 		struct dp_pdev *pdev, uint32_t *tag_buf,
2182 		struct ppdu_info *ppdu_info)
2183 {
2184 	uint16_t peer_id;
2185 	struct cdp_tx_completion_ppdu *ppdu_desc;
2186 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2187 	uint8_t curr_user_index = 0;
2188 
2189 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2190 
2191 	tag_buf += 2;
2192 	peer_id =
2193 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2194 
2195 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2196 		return;
2197 
2198 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2199 
2200 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2201 	ppdu_user_desc->peer_id = peer_id;
2202 
2203 	tag_buf++;
2204 	ppdu_user_desc->tid =
2205 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2206 	ppdu_user_desc->num_mpdu =
2207 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2208 
2209 	ppdu_user_desc->num_msdu =
2210 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2211 
2212 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2213 
2214 	tag_buf += 2;
2215 	ppdu_user_desc->success_bytes = *tag_buf;
2216 
2217 }
2218 
2219 /*
2220  * dp_process_ppdu_stats_user_common_array_tlv: Process
2221  * htt_ppdu_stats_user_common_array_tlv
2222  * pdev: DP PDEV handle
2223  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2224  * @ppdu_info: per ppdu tlv structure
2225  *
2226  * return:void
2227  */
2228 static void dp_process_ppdu_stats_user_common_array_tlv(
2229 		struct dp_pdev *pdev, uint32_t *tag_buf,
2230 		struct ppdu_info *ppdu_info)
2231 {
2232 	uint32_t peer_id;
2233 	struct cdp_tx_completion_ppdu *ppdu_desc;
2234 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2235 	uint8_t curr_user_index = 0;
2236 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2237 
2238 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2239 
2240 	tag_buf++;
2241 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2242 	tag_buf += 3;
2243 	peer_id =
2244 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2245 
2246 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2247 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2248 			"Invalid peer");
2249 		return;
2250 	}
2251 
2252 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2253 
2254 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2255 
2256 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2257 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2258 
2259 	tag_buf++;
2260 
2261 	ppdu_user_desc->success_msdus =
2262 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2263 	ppdu_user_desc->retry_bytes =
2264 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2265 	tag_buf++;
2266 	ppdu_user_desc->failed_msdus =
2267 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2268 }
2269 
2270 /*
2271  * dp_process_ppdu_stats_flush_tlv: Process
2272  * htt_ppdu_stats_flush_tlv
2273  * @pdev: DP PDEV handle
2274  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2275  *
2276  * return:void
2277  */
2278 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2279 						uint32_t *tag_buf)
2280 {
2281 	uint32_t peer_id;
2282 	uint32_t drop_reason;
2283 	uint8_t tid;
2284 	uint32_t num_msdu;
2285 	struct dp_peer *peer;
2286 
2287 	tag_buf++;
2288 	drop_reason = *tag_buf;
2289 
2290 	tag_buf++;
2291 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2292 
2293 	tag_buf++;
2294 	peer_id =
2295 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2296 
2297 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2298 	if (!peer)
2299 		return;
2300 
2301 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2302 
2303 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2304 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2305 					num_msdu);
2306 	}
2307 
2308 	dp_peer_unref_del_find_by_id(peer);
2309 }
2310 
2311 /*
2312  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2313  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2314  * @pdev: DP PDEV handle
2315  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2316  * @length: tlv_length
2317  *
2318  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2319  */
2320 static QDF_STATUS
2321 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2322 					      qdf_nbuf_t tag_buf,
2323 					      uint32_t ppdu_id)
2324 {
2325 	uint32_t *nbuf_ptr;
2326 	uint8_t trim_size;
2327 
2328 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2329 	    (!pdev->bpr_enable))
2330 		return QDF_STATUS_SUCCESS;
2331 
2332 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2333 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2334 		      qdf_nbuf_data(tag_buf));
2335 
2336 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2337 		return QDF_STATUS_SUCCESS;
2338 
2339 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2340 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2341 
2342 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2343 				tag_buf, sizeof(ppdu_id));
2344 	*nbuf_ptr = ppdu_id;
2345 
2346 	if (pdev->bpr_enable) {
2347 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2348 				     tag_buf, HTT_INVALID_PEER,
2349 				     WDI_NO_VAL, pdev->pdev_id);
2350 	}
2351 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2352 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2353 				     tag_buf, HTT_INVALID_PEER,
2354 				     WDI_NO_VAL, pdev->pdev_id);
2355 	}
2356 
2357 	return QDF_STATUS_E_ALREADY;
2358 }
2359 
2360 /**
2361  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2362  * @pdev: DP pdev handle
2363  * @tag_buf: TLV buffer
2364  * @tlv_len: length of tlv
2365  * @ppdu_info: per ppdu tlv structure
2366  *
2367  * return: void
2368  */
2369 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2370 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2371 {
2372 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2373 
2374 	switch (tlv_type) {
2375 	case HTT_PPDU_STATS_COMMON_TLV:
2376 		qdf_assert_always(tlv_len ==
2377 				sizeof(htt_ppdu_stats_common_tlv));
2378 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2379 		break;
2380 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2381 		qdf_assert_always(tlv_len ==
2382 				sizeof(htt_ppdu_stats_user_common_tlv));
2383 		dp_process_ppdu_stats_user_common_tlv(
2384 				pdev, tag_buf, ppdu_info);
2385 		break;
2386 	case HTT_PPDU_STATS_USR_RATE_TLV:
2387 		qdf_assert_always(tlv_len ==
2388 				sizeof(htt_ppdu_stats_user_rate_tlv));
2389 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2390 		break;
2391 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2392 		qdf_assert_always(tlv_len ==
2393 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2394 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2395 				pdev, tag_buf, ppdu_info);
2396 		break;
2397 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2398 		qdf_assert_always(tlv_len ==
2399 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2400 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2401 				pdev, tag_buf, ppdu_info);
2402 		break;
2403 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2404 		qdf_assert_always(tlv_len ==
2405 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2406 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2407 				pdev, tag_buf, ppdu_info);
2408 		break;
2409 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2410 		qdf_assert_always(tlv_len ==
2411 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2412 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2413 				pdev, tag_buf, ppdu_info);
2414 		break;
2415 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2416 		qdf_assert_always(tlv_len ==
2417 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2418 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2419 				pdev, tag_buf, ppdu_info);
2420 		break;
2421 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2422 		qdf_assert_always(tlv_len ==
2423 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2424 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2425 				pdev, tag_buf, ppdu_info);
2426 		break;
2427 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2428 		qdf_assert_always(tlv_len ==
2429 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2430 		dp_process_ppdu_stats_user_common_array_tlv(
2431 				pdev, tag_buf, ppdu_info);
2432 		break;
2433 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2434 		qdf_assert_always(tlv_len ==
2435 			sizeof(htt_ppdu_stats_flush_tlv));
2436 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2437 				pdev, tag_buf);
2438 		break;
2439 	default:
2440 		break;
2441 	}
2442 }
2443 
2444 /**
2445  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2446  * to upper layer
2447  * @pdev: DP pdev handle
2448  * @ppdu_info: per PPDU TLV descriptor
2449  *
2450  * return: void
2451  */
2452 static
2453 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2454 			  struct ppdu_info *ppdu_info)
2455 {
2456 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2457 	struct dp_peer *peer = NULL;
2458 	qdf_nbuf_t nbuf;
2459 	uint16_t i;
2460 
2461 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2462 		qdf_nbuf_data(ppdu_info->nbuf);
2463 
2464 	ppdu_desc->num_users = ppdu_info->last_user;
2465 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2466 
2467 	for (i = 0; i < ppdu_desc->num_users; i++) {
2468 
2469 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2470 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2471 
2472 		peer = dp_peer_find_by_id(pdev->soc,
2473 					  ppdu_desc->user[i].peer_id);
2474 		/**
2475 		 * This check is to make sure peer is not deleted
2476 		 * after processing the TLVs.
2477 		 */
2478 		if (!peer)
2479 			continue;
2480 
2481 		if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2482 
2483 			dp_tx_stats_update(pdev->soc, peer,
2484 					&ppdu_desc->user[i],
2485 					ppdu_desc->ack_rssi);
2486 
2487 			dp_peer_unref_del_find_by_id(peer);
2488 		}
2489 
2490 		dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
2491 	}
2492 
2493 	/*
2494 	 * Remove from the list
2495 	 */
2496 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2497 	nbuf = ppdu_info->nbuf;
2498 	pdev->list_depth--;
2499 	qdf_mem_free(ppdu_info);
2500 
2501 	qdf_assert_always(nbuf);
2502 
2503 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2504 		qdf_nbuf_data(nbuf);
2505 
2506 	/**
2507 	 * Deliver PPDU stats only for valid (acked) data frames if
2508 	 * sniffer mode is not enabled.
2509 	 * If sniffer mode is enabled, PPDU stats for all frames
2510 	 * including mgmt/control frames should be delivered to upper layer
2511 	 */
2512 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2513 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2514 				nbuf, HTT_INVALID_PEER,
2515 				WDI_NO_VAL, pdev->pdev_id);
2516 	} else {
2517 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2518 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2519 
2520 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2521 					pdev->soc, nbuf, HTT_INVALID_PEER,
2522 					WDI_NO_VAL, pdev->pdev_id);
2523 		} else
2524 			qdf_nbuf_free(nbuf);
2525 	}
2526 	return;
2527 }
2528 
2529 /**
2530  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2531  * desc for new ppdu id
2532  * @pdev: DP pdev handle
2533  * @ppdu_id: PPDU unique identifier
2534  * @tlv_type: TLV type received
2535  *
2536  * return: ppdu_info per ppdu tlv structure
2537  */
2538 static
2539 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2540 			uint8_t tlv_type)
2541 {
2542 	struct ppdu_info *ppdu_info = NULL;
2543 
2544 	/*
2545 	 * Find ppdu_id node exists or not
2546 	 */
2547 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2548 
2549 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2550 			break;
2551 		}
2552 	}
2553 
2554 	if (ppdu_info) {
2555 		/**
2556 		 * if we get tlv_type that is already been processed for ppdu,
2557 		 * that means we got a new ppdu with same ppdu id.
2558 		 * Hence Flush the older ppdu
2559 		 */
2560 		if (ppdu_info->tlv_bitmap & (1 << tlv_type))
2561 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2562 		else
2563 			return ppdu_info;
2564 	}
2565 
2566 	/**
2567 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2568 	 * threshold
2569 	 */
2570 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2571 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2572 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2573 	}
2574 
2575 	/*
2576 	 * Allocate new ppdu_info node
2577 	 */
2578 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2579 	if (!ppdu_info)
2580 		return NULL;
2581 
2582 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2583 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2584 			TRUE);
2585 	if (!ppdu_info->nbuf) {
2586 		qdf_mem_free(ppdu_info);
2587 		return NULL;
2588 	}
2589 
2590 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2591 			sizeof(struct cdp_tx_completion_ppdu));
2592 
2593 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2594 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2595 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2596 				"No tailroom for HTT PPDU");
2597 		qdf_nbuf_free(ppdu_info->nbuf);
2598 		ppdu_info->nbuf = NULL;
2599 		ppdu_info->last_user = 0;
2600 		qdf_mem_free(ppdu_info);
2601 		return NULL;
2602 	}
2603 
2604 	/**
2605 	 * No lock is needed because all PPDU TLVs are processed in
2606 	 * same context and this list is updated in same context
2607 	 */
2608 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2609 			ppdu_info_list_elem);
2610 	pdev->list_depth++;
2611 	return ppdu_info;
2612 }
2613 
2614 /**
2615  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2616  * @pdev: DP pdev handle
2617  * @htt_t2h_msg: HTT target to host message
2618  *
2619  * return: ppdu_info per ppdu tlv structure
2620  */
2621 
2622 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2623 		qdf_nbuf_t htt_t2h_msg)
2624 {
2625 	uint32_t length;
2626 	uint32_t ppdu_id;
2627 	uint8_t tlv_type;
2628 	uint32_t tlv_length, tlv_bitmap_expected;
2629 	uint8_t *tlv_buf;
2630 	struct ppdu_info *ppdu_info = NULL;
2631 
2632 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2633 
2634 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2635 
2636 	msg_word = msg_word + 1;
2637 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2638 
2639 
2640 	msg_word = msg_word + 3;
2641 	while (length > 0) {
2642 		tlv_buf = (uint8_t *)msg_word;
2643 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2644 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2645 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2646 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2647 
2648 		if (tlv_length == 0)
2649 			break;
2650 
2651 		tlv_length += HTT_TLV_HDR_LEN;
2652 
2653 		/**
2654 		 * Not allocating separate ppdu descriptor for MGMT Payload
2655 		 * TLV as this is sent as separate WDI indication and it
2656 		 * doesn't contain any ppdu information
2657 		 */
2658 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2659 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
2660 			pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length;
2661 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
2662 			msg_word =
2663 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2664 			length -= (tlv_length);
2665 			continue;
2666 		}
2667 
2668 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2669 		if (!ppdu_info)
2670 			return NULL;
2671 		ppdu_info->ppdu_id = ppdu_id;
2672 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2673 
2674 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2675 
2676 		/**
2677 		 * Increment pdev level tlv count to monitor
2678 		 * missing TLVs
2679 		 */
2680 		pdev->tlv_count++;
2681 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2682 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2683 		length -= (tlv_length);
2684 	}
2685 
2686 	if (!ppdu_info)
2687 		return NULL;
2688 
2689 	pdev->last_ppdu_id = ppdu_id;
2690 
2691 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2692 
2693 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2694 		if (ppdu_info->is_ampdu)
2695 			tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP;
2696 	}
2697 
2698 	/**
2699 	 * Once all the TLVs for a given PPDU has been processed,
2700 	 * return PPDU status to be delivered to higher layer
2701 	 */
2702 	if (ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2703 		return ppdu_info;
2704 
2705 	return NULL;
2706 }
2707 #endif /* FEATURE_PERPKT_INFO */
2708 
2709 /**
2710  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2711  * @soc: DP SOC handle
2712  * @pdev_id: pdev id
2713  * @htt_t2h_msg: HTT message nbuf
2714  *
2715  * return:void
2716  */
2717 #if defined(WDI_EVENT_ENABLE)
2718 #ifdef FEATURE_PERPKT_INFO
2719 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2720 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2721 {
2722 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2723 	struct ppdu_info *ppdu_info = NULL;
2724 	bool free_buf = true;
2725 
2726 	if (!pdev)
2727 		return true;
2728 
2729 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2730 	    !pdev->mcopy_mode && !pdev->bpr_enable)
2731 		return free_buf;
2732 
2733 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
2734 
2735 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
2736 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
2737 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
2738 		    QDF_STATUS_SUCCESS)
2739 			free_buf = false;
2740 
2741 		pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
2742 		pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
2743 		pdev->mgmtctrl_frm_info.ppdu_id = 0;
2744 	}
2745 
2746 	if (ppdu_info)
2747 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2748 
2749 	return free_buf;
2750 }
2751 #else
2752 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2753 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2754 {
2755 	return true;
2756 }
2757 #endif
2758 #endif
2759 
2760 /**
2761  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2762  * @soc: DP SOC handle
2763  * @htt_t2h_msg: HTT message nbuf
2764  *
2765  * return:void
2766  */
2767 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2768 		qdf_nbuf_t htt_t2h_msg)
2769 {
2770 	uint8_t done;
2771 	qdf_nbuf_t msg_copy;
2772 	uint32_t *msg_word;
2773 
2774 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2775 	msg_word = msg_word + 3;
2776 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2777 
2778 	/*
2779 	 * HTT EXT stats response comes as stream of TLVs which span over
2780 	 * multiple T2H messages.
2781 	 * The first message will carry length of the response.
2782 	 * For rest of the messages length will be zero.
2783 	 *
2784 	 * Clone the T2H message buffer and store it in a list to process
2785 	 * it later.
2786 	 *
2787 	 * The original T2H message buffers gets freed in the T2H HTT event
2788 	 * handler
2789 	 */
2790 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2791 
2792 	if (!msg_copy) {
2793 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2794 				"T2H messge clone failed for HTT EXT STATS");
2795 		goto error;
2796 	}
2797 
2798 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2799 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2800 	/*
2801 	 * Done bit signifies that this is the last T2H buffer in the stream of
2802 	 * HTT EXT STATS message
2803 	 */
2804 	if (done) {
2805 		soc->htt_stats.num_stats++;
2806 		qdf_sched_work(0, &soc->htt_stats.work);
2807 	}
2808 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2809 
2810 	return;
2811 
2812 error:
2813 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2814 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2815 			!= NULL) {
2816 		qdf_nbuf_free(msg_copy);
2817 	}
2818 	soc->htt_stats.num_stats = 0;
2819 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2820 	return;
2821 
2822 }
2823 
2824 /*
2825  * htt_soc_attach_target() - SOC level HTT setup
2826  * @htt_soc:	HTT SOC handle
2827  *
2828  * Return: 0 on success; error code on failure
2829  */
2830 int htt_soc_attach_target(void *htt_soc)
2831 {
2832 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2833 
2834 	return htt_h2t_ver_req_msg(soc);
2835 }
2836 
2837 
2838 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2839 /*
2840  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2841  * @htt_soc:	 HTT SOC handle
2842  * @msg_word:    Pointer to payload
2843  * @htt_t2h_msg: HTT msg nbuf
2844  *
2845  * Return: True if buffer should be freed by caller.
2846  */
2847 static bool
2848 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2849 				uint32_t *msg_word,
2850 				qdf_nbuf_t htt_t2h_msg)
2851 {
2852 	u_int8_t pdev_id;
2853 	bool free_buf;
2854 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2855 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2856 		"received HTT_T2H_MSG_TYPE_PPDU_STATS_IND");
2857 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2858 	pdev_id = DP_HW2SW_MACID(pdev_id);
2859 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2860 					      htt_t2h_msg);
2861 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2862 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2863 		pdev_id);
2864 	return free_buf;
2865 }
2866 #else
2867 static bool
2868 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2869 				uint32_t *msg_word,
2870 				qdf_nbuf_t htt_t2h_msg)
2871 {
2872 	return true;
2873 }
2874 #endif
2875 
2876 #if defined(WDI_EVENT_ENABLE) && \
2877 		!defined(REMOVE_PKT_LOG) && defined(CONFIG_WIN)
2878 /*
2879  * dp_pktlog_msg_handler() - Pktlog msg handler
2880  * @htt_soc:	 HTT SOC handle
2881  * @msg_word:    Pointer to payload
2882  *
2883  * Return: None
2884  */
2885 static void
2886 dp_pktlog_msg_handler(struct htt_soc *soc,
2887 				uint32_t *msg_word)
2888 {
2889 	uint8_t pdev_id;
2890 	uint32_t *pl_hdr;
2891 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2892 		"received HTT_T2H_MSG_TYPE_PKTLOG");
2893 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2894 	pdev_id = DP_HW2SW_MACID(pdev_id);
2895 	pl_hdr = (msg_word + 1);
2896 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2897 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2898 		pdev_id);
2899 }
2900 #else
2901 static void
2902 dp_pktlog_msg_handler(struct htt_soc *soc,
2903 				uint32_t *msg_word)
2904 {
2905 }
2906 #endif
2907 
2908 /*
2909  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2910  * @context:	Opaque context (HTT SOC handle)
2911  * @pkt:	HTC packet
2912  */
2913 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2914 {
2915 	struct htt_soc *soc = (struct htt_soc *) context;
2916 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2917 	u_int32_t *msg_word;
2918 	enum htt_t2h_msg_type msg_type;
2919 	bool free_buf = true;
2920 
2921 	/* check for successful message reception */
2922 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2923 		if (pkt->Status != QDF_STATUS_E_CANCELED)
2924 			soc->stats.htc_err_cnt++;
2925 
2926 		qdf_nbuf_free(htt_t2h_msg);
2927 		return;
2928 	}
2929 
2930 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
2931 
2932 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
2933 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2934 	switch (msg_type) {
2935 	case HTT_T2H_MSG_TYPE_PEER_MAP:
2936 		{
2937 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
2938 			u_int8_t *peer_mac_addr;
2939 			u_int16_t peer_id;
2940 			u_int16_t hw_peer_id;
2941 			u_int8_t vdev_id;
2942 			u_int8_t is_wds;
2943 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2944 
2945 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
2946 			hw_peer_id =
2947 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
2948 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
2949 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
2950 				(u_int8_t *) (msg_word+1),
2951 				&mac_addr_deswizzle_buf[0]);
2952 			QDF_TRACE(QDF_MODULE_ID_TXRX,
2953 				QDF_TRACE_LEVEL_INFO,
2954 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
2955 				peer_id, vdev_id);
2956 
2957 			/*
2958 			 * check if peer already exists for this peer_id, if so
2959 			 * this peer map event is in response for a wds peer add
2960 			 * wmi command sent during wds source port learning.
2961 			 * in this case just add the ast entry to the existing
2962 			 * peer ast_list.
2963 			 */
2964 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
2965 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
2966 					       vdev_id, peer_mac_addr, 0,
2967 					       is_wds);
2968 			break;
2969 		}
2970 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
2971 		{
2972 			u_int16_t peer_id;
2973 			u_int8_t vdev_id;
2974 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
2975 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
2976 
2977 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
2978 						 vdev_id, NULL, 0);
2979 			break;
2980 		}
2981 	case HTT_T2H_MSG_TYPE_SEC_IND:
2982 		{
2983 			u_int16_t peer_id;
2984 			enum cdp_sec_type sec_type;
2985 			int is_unicast;
2986 
2987 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
2988 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
2989 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
2990 			/* point to the first part of the Michael key */
2991 			msg_word++;
2992 			dp_rx_sec_ind_handler(
2993 				soc->dp_soc, peer_id, sec_type, is_unicast,
2994 				msg_word, msg_word + 2);
2995 			break;
2996 		}
2997 
2998 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
2999 		{
3000 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
3001 							     htt_t2h_msg);
3002 			break;
3003 		}
3004 
3005 	case HTT_T2H_MSG_TYPE_PKTLOG:
3006 		{
3007 			dp_pktlog_msg_handler(soc, msg_word);
3008 			break;
3009 		}
3010 
3011 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3012 		{
3013 			htc_pm_runtime_put(soc->htc_soc);
3014 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3015 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3016 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3017 				"target uses HTT version %d.%d; host uses %d.%d",
3018 				soc->tgt_ver.major, soc->tgt_ver.minor,
3019 				HTT_CURRENT_VERSION_MAJOR,
3020 				HTT_CURRENT_VERSION_MINOR);
3021 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3022 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3023 					QDF_TRACE_LEVEL_ERROR,
3024 					"*** Incompatible host/target HTT versions!");
3025 			}
3026 			/* abort if the target is incompatible with the host */
3027 			qdf_assert(soc->tgt_ver.major ==
3028 				HTT_CURRENT_VERSION_MAJOR);
3029 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3030 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3031 					QDF_TRACE_LEVEL_WARN,
3032 					"*** Warning: host/target HTT versions"
3033 					" are different, though compatible!");
3034 			}
3035 			break;
3036 		}
3037 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3038 		{
3039 			uint16_t peer_id;
3040 			uint8_t tid;
3041 			uint8_t win_sz;
3042 			uint16_t status;
3043 			struct dp_peer *peer;
3044 
3045 			/*
3046 			 * Update REO Queue Desc with new values
3047 			 */
3048 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3049 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3050 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3051 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3052 
3053 			/*
3054 			 * Window size needs to be incremented by 1
3055 			 * since fw needs to represent a value of 256
3056 			 * using just 8 bits
3057 			 */
3058 			if (peer) {
3059 				status = dp_addba_requestprocess_wifi3(peer,
3060 						0, tid, 0, win_sz + 1, 0xffff);
3061 
3062 				/*
3063 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
3064 				 * which is inc by dp_peer_find_by_id
3065 				 */
3066 				dp_peer_unref_del_find_by_id(peer);
3067 
3068 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3069 					QDF_TRACE_LEVEL_INFO,
3070 					FL("PeerID %d BAW %d TID %d stat %d"),
3071 					peer_id, win_sz, tid, status);
3072 
3073 			} else {
3074 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3075 					QDF_TRACE_LEVEL_ERROR,
3076 					FL("Peer not found peer id %d"),
3077 					peer_id);
3078 			}
3079 			break;
3080 		}
3081 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3082 		{
3083 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3084 			break;
3085 		}
3086 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3087 		{
3088 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
3089 			u_int8_t *peer_mac_addr;
3090 			u_int16_t peer_id;
3091 			u_int16_t hw_peer_id;
3092 			u_int8_t vdev_id;
3093 			bool is_wds;
3094 			u_int16_t ast_hash;
3095 
3096 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3097 			hw_peer_id =
3098 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3099 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3100 			peer_mac_addr =
3101 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3102 						   &mac_addr_deswizzle_buf[0]);
3103 			is_wds =
3104 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3105 			ast_hash =
3106 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3107 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3108 				  QDF_TRACE_LEVEL_INFO,
3109 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3110 				  peer_id, vdev_id);
3111 
3112 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3113 					       hw_peer_id, vdev_id,
3114 					       peer_mac_addr, ast_hash,
3115 					       is_wds);
3116 			break;
3117 		}
3118 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3119 		{
3120 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
3121 			u_int8_t *peer_mac_addr;
3122 			u_int16_t peer_id;
3123 			u_int8_t vdev_id;
3124 			u_int8_t is_wds;
3125 
3126 			peer_id =
3127 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3128 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3129 			peer_mac_addr =
3130 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3131 						   &mac_addr_deswizzle_buf[0]);
3132 			is_wds =
3133 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3134 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3135 				  QDF_TRACE_LEVEL_INFO,
3136 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3137 				  peer_id, vdev_id);
3138 
3139 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3140 						 vdev_id, peer_mac_addr,
3141 						 is_wds);
3142 			break;
3143 		}
3144 	default:
3145 		break;
3146 	};
3147 
3148 	/* Free the indication buffer */
3149 	if (free_buf)
3150 		qdf_nbuf_free(htt_t2h_msg);
3151 }
3152 
3153 /*
3154  * dp_htt_h2t_full() - Send full handler (called from HTC)
3155  * @context:	Opaque context (HTT SOC handle)
3156  * @pkt:	HTC packet
3157  *
3158  * Return: enum htc_send_full_action
3159  */
3160 static enum htc_send_full_action
3161 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3162 {
3163 	return HTC_SEND_FULL_KEEP;
3164 }
3165 
3166 /*
3167  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3168  * @context:	Opaque context (HTT SOC handle)
3169  * @nbuf:	nbuf containing T2H message
3170  * @pipe_id:	HIF pipe ID
3171  *
3172  * Return: QDF_STATUS
3173  *
3174  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3175  * will be used for packet log and other high-priority HTT messages. Proper
3176  * HTC connection to be added later once required FW changes are available
3177  */
3178 static QDF_STATUS
3179 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3180 {
3181 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3182 	HTC_PACKET htc_pkt;
3183 
3184 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3185 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3186 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3187 	htc_pkt.pPktContext = (void *)nbuf;
3188 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3189 
3190 	return rc;
3191 }
3192 
3193 /*
3194  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3195  * @htt_soc:	HTT SOC handle
3196  *
3197  * Return: 0 on success; error code on failure
3198  */
3199 static int
3200 htt_htc_soc_attach(struct htt_soc *soc)
3201 {
3202 	struct htc_service_connect_req connect;
3203 	struct htc_service_connect_resp response;
3204 	A_STATUS status;
3205 	struct dp_soc *dpsoc = soc->dp_soc;
3206 
3207 	qdf_mem_set(&connect, sizeof(connect), 0);
3208 	qdf_mem_set(&response, sizeof(response), 0);
3209 
3210 	connect.pMetaData = NULL;
3211 	connect.MetaDataLength = 0;
3212 	connect.EpCallbacks.pContext = soc;
3213 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3214 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3215 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3216 
3217 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3218 	connect.EpCallbacks.EpRecvRefill = NULL;
3219 
3220 	/* N/A, fill is done by HIF */
3221 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3222 
3223 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3224 	/*
3225 	 * Specify how deep to let a queue get before htc_send_pkt will
3226 	 * call the EpSendFull function due to excessive send queue depth.
3227 	 */
3228 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3229 
3230 	/* disable flow control for HTT data message service */
3231 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3232 
3233 	/* connect to control service */
3234 	connect.service_id = HTT_DATA_MSG_SVC;
3235 
3236 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3237 
3238 	if (status != A_OK)
3239 		return QDF_STATUS_E_FAILURE;
3240 
3241 	soc->htc_endpoint = response.Endpoint;
3242 
3243 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3244 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3245 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3246 
3247 	return 0; /* success */
3248 }
3249 
3250 /*
3251  * htt_soc_attach() - SOC level HTT initialization
3252  * @dp_soc:	Opaque Data path SOC handle
3253  * @ctrl_psoc:	Opaque ctrl SOC handle
3254  * @htc_soc:	SOC level HTC handle
3255  * @hal_soc:	Opaque HAL SOC handle
3256  * @osdev:	QDF device
3257  *
3258  * Return: HTT handle on success; NULL on failure
3259  */
3260 void *
3261 htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3262 	void *hal_soc, qdf_device_t osdev)
3263 {
3264 	struct htt_soc *soc;
3265 	int i;
3266 
3267 	soc = qdf_mem_malloc(sizeof(*soc));
3268 
3269 	if (!soc)
3270 		goto fail1;
3271 
3272 	soc->osdev = osdev;
3273 	soc->ctrl_psoc = ctrl_psoc;
3274 	soc->dp_soc = dp_soc;
3275 	soc->htc_soc = htc_soc;
3276 	soc->hal_soc = hal_soc;
3277 
3278 	/* TODO: See if any NSS related context is required in htt_soc */
3279 
3280 	soc->htt_htc_pkt_freelist = NULL;
3281 
3282 	if (htt_htc_soc_attach(soc))
3283 		goto fail2;
3284 
3285 	/* TODO: See if any Rx data specific intialization is required. For
3286 	 * MCL use cases, the data will be received as single packet and
3287 	 * should not required any descriptor or reorder handling
3288 	 */
3289 
3290 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3291 
3292 	/* pre-allocate some HTC_PACKET objects */
3293 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3294 		struct dp_htt_htc_pkt_union *pkt;
3295 		pkt = qdf_mem_malloc(sizeof(*pkt));
3296 		if (!pkt)
3297 			break;
3298 
3299 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3300 	}
3301 
3302 	return soc;
3303 
3304 fail2:
3305 	qdf_mem_free(soc);
3306 
3307 fail1:
3308 	return NULL;
3309 }
3310 
3311 
3312 /*
3313  * htt_soc_detach() - Detach SOC level HTT
3314  * @htt_soc:	HTT SOC handle
3315  */
3316 void
3317 htt_soc_detach(void *htt_soc)
3318 {
3319 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3320 
3321 	htt_htc_misc_pkt_pool_free(soc);
3322 	htt_htc_pkt_pool_free(soc);
3323 	HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex);
3324 	qdf_mem_free(soc);
3325 }
3326 
3327 /**
3328  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3329  * @pdev: DP PDEV handle
3330  * @stats_type_upload_mask: stats type requested by user
3331  * @config_param_0: extra configuration parameters
3332  * @config_param_1: extra configuration parameters
3333  * @config_param_2: extra configuration parameters
3334  * @config_param_3: extra configuration parameters
3335  * @mac_id: mac number
3336  *
3337  * return: QDF STATUS
3338  */
3339 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3340 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3341 		uint32_t config_param_1, uint32_t config_param_2,
3342 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3343 		uint8_t mac_id)
3344 {
3345 	struct htt_soc *soc = pdev->soc->htt_handle;
3346 	struct dp_htt_htc_pkt *pkt;
3347 	qdf_nbuf_t msg;
3348 	uint32_t *msg_word;
3349 	uint8_t pdev_mask = 0;
3350 
3351 	msg = qdf_nbuf_alloc(
3352 			soc->osdev,
3353 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3354 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3355 
3356 	if (!msg)
3357 		return QDF_STATUS_E_NOMEM;
3358 
3359 	/*TODO:Add support for SOC stats
3360 	 * Bit 0: SOC Stats
3361 	 * Bit 1: Pdev stats for pdev id 0
3362 	 * Bit 2: Pdev stats for pdev id 1
3363 	 * Bit 3: Pdev stats for pdev id 2
3364 	 */
3365 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3366 
3367 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3368 	/*
3369 	 * Set the length of the message.
3370 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3371 	 * separately during the below call to qdf_nbuf_push_head.
3372 	 * The contribution from the HTC header is added separately inside HTC.
3373 	 */
3374 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3375 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3376 				"Failed to expand head for HTT_EXT_STATS");
3377 		qdf_nbuf_free(msg);
3378 		return QDF_STATUS_E_FAILURE;
3379 	}
3380 
3381 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3382 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3383 		"config_param_1 %u\n config_param_2 %u\n"
3384 		"config_param_4 %u\n -------------",
3385 		__func__, __LINE__, cookie_val, config_param_0,
3386 		config_param_1, config_param_2,	config_param_3);
3387 
3388 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3389 
3390 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3391 	*msg_word = 0;
3392 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3393 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3394 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3395 
3396 	/* word 1 */
3397 	msg_word++;
3398 	*msg_word = 0;
3399 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3400 
3401 	/* word 2 */
3402 	msg_word++;
3403 	*msg_word = 0;
3404 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3405 
3406 	/* word 3 */
3407 	msg_word++;
3408 	*msg_word = 0;
3409 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3410 
3411 	/* word 4 */
3412 	msg_word++;
3413 	*msg_word = 0;
3414 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3415 
3416 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3417 
3418 	/* word 5 */
3419 	msg_word++;
3420 
3421 	/* word 6 */
3422 	msg_word++;
3423 	*msg_word = 0;
3424 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3425 
3426 	/* word 7 */
3427 	msg_word++;
3428 	*msg_word = 0;
3429 	/*Using last 2 bits for pdev_id */
3430 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3431 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3432 
3433 	pkt = htt_htc_pkt_alloc(soc);
3434 	if (!pkt) {
3435 		qdf_nbuf_free(msg);
3436 		return QDF_STATUS_E_NOMEM;
3437 	}
3438 
3439 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3440 
3441 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3442 			dp_htt_h2t_send_complete_free_netbuf,
3443 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3444 			soc->htc_endpoint,
3445 			1); /* tag - not relevant here */
3446 
3447 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3448 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3449 	return 0;
3450 }
3451 
3452 /* This macro will revert once proper HTT header will define for
3453  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3454  * */
3455 #if defined(WDI_EVENT_ENABLE)
3456 /**
3457  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3458  * @pdev: DP PDEV handle
3459  * @stats_type_upload_mask: stats type requested by user
3460  * @mac_id: Mac id number
3461  *
3462  * return: QDF STATUS
3463  */
3464 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3465 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3466 {
3467 	struct htt_soc *soc = pdev->soc->htt_handle;
3468 	struct dp_htt_htc_pkt *pkt;
3469 	qdf_nbuf_t msg;
3470 	uint32_t *msg_word;
3471 	uint8_t pdev_mask;
3472 
3473 	msg = qdf_nbuf_alloc(
3474 			soc->osdev,
3475 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3476 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3477 
3478 	if (!msg) {
3479 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3480 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
3481 		qdf_assert(0);
3482 		return QDF_STATUS_E_NOMEM;
3483 	}
3484 
3485 	/*TODO:Add support for SOC stats
3486 	 * Bit 0: SOC Stats
3487 	 * Bit 1: Pdev stats for pdev id 0
3488 	 * Bit 2: Pdev stats for pdev id 1
3489 	 * Bit 3: Pdev stats for pdev id 2
3490 	 */
3491 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3492 
3493 	/*
3494 	 * Set the length of the message.
3495 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3496 	 * separately during the below call to qdf_nbuf_push_head.
3497 	 * The contribution from the HTC header is added separately inside HTC.
3498 	 */
3499 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3500 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3501 				"Failed to expand head for HTT_CFG_STATS");
3502 		qdf_nbuf_free(msg);
3503 		return QDF_STATUS_E_FAILURE;
3504 	}
3505 
3506 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3507 
3508 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3509 	*msg_word = 0;
3510 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3511 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3512 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3513 			stats_type_upload_mask);
3514 
3515 	pkt = htt_htc_pkt_alloc(soc);
3516 	if (!pkt) {
3517 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3518 				"Fail to allocate dp_htt_htc_pkt buffer");
3519 		qdf_assert(0);
3520 		qdf_nbuf_free(msg);
3521 		return QDF_STATUS_E_NOMEM;
3522 	}
3523 
3524 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3525 
3526 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3527 			dp_htt_h2t_send_complete_free_netbuf,
3528 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3529 			soc->htc_endpoint,
3530 			1); /* tag - not relevant here */
3531 
3532 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3533 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3534 	return 0;
3535 }
3536 #endif
3537