xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_htt.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
30 #include "cdp_txrx_cmn_struct.h"
31 
32 #ifdef FEATURE_PERPKT_INFO
33 #include "dp_ratetable.h"
34 #endif
35 
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 
38 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
39 #define HTT_T2H_MAX_MSG_SIZE 2048
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
48 do {                                                             \
49 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
50 					QDF_STATUS_SUCCESS)      \
51 		htt_htc_misc_pkt_list_add(soc, pkt);             \
52 } while (0)
53 
54 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
55 
56 /**
57  * Bitmap of HTT PPDU TLV types for Default mode
58  */
59 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
60 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
61 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
62 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
63 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
64 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
65 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
66 
67 /**
68  * Bitmap of HTT PPDU TLV types for Sniffer mode
69  */
70 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP \
71 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
72 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
74 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
75 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
76 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
77 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
78 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
79 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV)
80 
81 #define HTT_FRAMECTRL_DATATYPE 0x08
82 #define HTT_PPDU_DESC_MAX_DEPTH 16
83 #define DP_SCAN_PEER_ID 0xFFFF
84 
85 /*
86  * dp_tx_stats_update() - Update per-peer statistics
87  * @soc: Datapath soc handle
88  * @peer: Datapath peer handle
89  * @ppdu: PPDU Descriptor
90  * @ack_rssi: RSSI of last ack received
91  *
92  * Return: None
93  */
94 #ifdef FEATURE_PERPKT_INFO
95 static inline void
96 dp_tx_rate_stats_update(struct dp_peer *peer,
97 			struct cdp_tx_completion_ppdu_user *ppdu)
98 {
99 	uint32_t ratekbps = 0;
100 	uint32_t ppdu_tx_rate = 0;
101 
102 	if (!peer || !ppdu)
103 		return;
104 
105 
106 	ratekbps = dp_getrateindex(ppdu->gi,
107 				   ppdu->mcs,
108 				   ppdu->nss,
109 				   ppdu->preamble,
110 				   ppdu->bw);
111 
112 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
113 
114 	if (!ratekbps)
115 		return;
116 
117 	peer->stats.tx.avg_tx_rate =
118 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
119 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
120 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
121 
122 	if (peer->vdev) {
123 		if (peer->bss_peer) {
124 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
125 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
126 		} else {
127 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
128 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
129 		}
130 	}
131 }
132 
133 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
134 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
135 {
136 	struct dp_pdev *pdev = peer->vdev->pdev;
137 	uint8_t preamble, mcs;
138 	uint16_t num_msdu;
139 
140 	preamble = ppdu->preamble;
141 	mcs = ppdu->mcs;
142 	num_msdu = ppdu->num_msdu;
143 
144 	/* If the peer statistics are already processed as part of
145 	 * per-MSDU completion handler, do not process these again in per-PPDU
146 	 * indications */
147 	if (soc->process_tx_status)
148 		return;
149 
150 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
151 			num_msdu, (ppdu->success_bytes +
152 				ppdu->retry_bytes + ppdu->failed_bytes));
153 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
154 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
155 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
156 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
157 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
158 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
159 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
160 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
161 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
162 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
163 
164 	DP_STATS_INC(peer, tx.retries,
165 			(ppdu->long_retries + ppdu->short_retries));
166 	DP_STATS_INCC(peer,
167 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
168 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
169 	DP_STATS_INCC(peer,
170 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
171 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
172 	DP_STATS_INCC(peer,
173 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
174 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
175 	DP_STATS_INCC(peer,
176 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
177 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
178 	DP_STATS_INCC(peer,
179 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
180 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
181 	DP_STATS_INCC(peer,
182 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
183 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
184 	DP_STATS_INCC(peer,
185 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
186 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
187 	DP_STATS_INCC(peer,
188 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
189 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
190 	DP_STATS_INCC(peer,
191 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
192 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
193 	DP_STATS_INCC(peer,
194 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
195 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
196 
197 	dp_peer_stats_notify(peer);
198 
199 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
200 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
201 			     &peer->stats, ppdu->peer_id,
202 			     UPDATE_PEER_STATS, pdev->pdev_id);
203 #endif
204 }
205 #endif
206 
207 /*
208  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
209  * @htt_soc:	HTT SOC handle
210  *
211  * Return: Pointer to htc packet buffer
212  */
213 static struct dp_htt_htc_pkt *
214 htt_htc_pkt_alloc(struct htt_soc *soc)
215 {
216 	struct dp_htt_htc_pkt_union *pkt = NULL;
217 
218 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
219 	if (soc->htt_htc_pkt_freelist) {
220 		pkt = soc->htt_htc_pkt_freelist;
221 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
222 	}
223 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
224 
225 	if (pkt == NULL)
226 		pkt = qdf_mem_malloc(sizeof(*pkt));
227 	return &pkt->u.pkt; /* not actually a dereference */
228 }
229 
230 /*
231  * htt_htc_pkt_free() - Free HTC packet buffer
232  * @htt_soc:	HTT SOC handle
233  */
234 static void
235 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
236 {
237 	struct dp_htt_htc_pkt_union *u_pkt =
238 		(struct dp_htt_htc_pkt_union *)pkt;
239 
240 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
241 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
242 	soc->htt_htc_pkt_freelist = u_pkt;
243 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
244 }
245 
246 /*
247  * htt_htc_pkt_pool_free() - Free HTC packet pool
248  * @htt_soc:	HTT SOC handle
249  */
250 static void
251 htt_htc_pkt_pool_free(struct htt_soc *soc)
252 {
253 	struct dp_htt_htc_pkt_union *pkt, *next;
254 	pkt = soc->htt_htc_pkt_freelist;
255 	while (pkt) {
256 		next = pkt->u.next;
257 		qdf_mem_free(pkt);
258 		pkt = next;
259 	}
260 	soc->htt_htc_pkt_freelist = NULL;
261 }
262 
263 /*
264  * htt_htc_misc_pkt_list_trim() - trim misc list
265  * @htt_soc: HTT SOC handle
266  * @level: max no. of pkts in list
267  */
268 static void
269 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
270 {
271 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
272 	int i = 0;
273 	qdf_nbuf_t netbuf;
274 
275 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
276 	pkt = soc->htt_htc_pkt_misclist;
277 	while (pkt) {
278 		next = pkt->u.next;
279 		/* trim the out grown list*/
280 		if (++i > level) {
281 			netbuf =
282 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
283 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
284 			qdf_nbuf_free(netbuf);
285 			qdf_mem_free(pkt);
286 			pkt = NULL;
287 			if (prev)
288 				prev->u.next = NULL;
289 		}
290 		prev = pkt;
291 		pkt = next;
292 	}
293 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
294 }
295 
296 /*
297  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
298  * @htt_soc:	HTT SOC handle
299  * @dp_htt_htc_pkt: pkt to be added to list
300  */
301 static void
302 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
303 {
304 	struct dp_htt_htc_pkt_union *u_pkt =
305 				(struct dp_htt_htc_pkt_union *)pkt;
306 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
307 							pkt->htc_pkt.Endpoint)
308 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
309 
310 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
311 	if (soc->htt_htc_pkt_misclist) {
312 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
313 		soc->htt_htc_pkt_misclist = u_pkt;
314 	} else {
315 		soc->htt_htc_pkt_misclist = u_pkt;
316 	}
317 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
318 
319 	/* only ce pipe size + tx_queue_depth could possibly be in use
320 	 * free older packets in the misclist
321 	 */
322 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
323 }
324 
325 /*
326  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
327  * @htt_soc:	HTT SOC handle
328  */
329 static void
330 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
331 {
332 	struct dp_htt_htc_pkt_union *pkt, *next;
333 	qdf_nbuf_t netbuf;
334 
335 	pkt = soc->htt_htc_pkt_misclist;
336 
337 	while (pkt) {
338 		next = pkt->u.next;
339 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
340 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
341 
342 		soc->stats.htc_pkt_free++;
343 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
344 			 "%s: Pkt free count %d",
345 			 __func__, soc->stats.htc_pkt_free);
346 
347 		qdf_nbuf_free(netbuf);
348 		qdf_mem_free(pkt);
349 		pkt = next;
350 	}
351 	soc->htt_htc_pkt_misclist = NULL;
352 }
353 
354 /*
355  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
356  * @tgt_mac_addr:	Target MAC
357  * @buffer:		Output buffer
358  */
359 static u_int8_t *
360 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
361 {
362 #ifdef BIG_ENDIAN_HOST
363 	/*
364 	 * The host endianness is opposite of the target endianness.
365 	 * To make u_int32_t elements come out correctly, the target->host
366 	 * upload has swizzled the bytes in each u_int32_t element of the
367 	 * message.
368 	 * For byte-array message fields like the MAC address, this
369 	 * upload swizzling puts the bytes in the wrong order, and needs
370 	 * to be undone.
371 	 */
372 	buffer[0] = tgt_mac_addr[3];
373 	buffer[1] = tgt_mac_addr[2];
374 	buffer[2] = tgt_mac_addr[1];
375 	buffer[3] = tgt_mac_addr[0];
376 	buffer[4] = tgt_mac_addr[7];
377 	buffer[5] = tgt_mac_addr[6];
378 	return buffer;
379 #else
380 	/*
381 	 * The host endianness matches the target endianness -
382 	 * we can use the mac addr directly from the message buffer.
383 	 */
384 	return tgt_mac_addr;
385 #endif
386 }
387 
388 /*
389  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
390  * @soc:	SOC handle
391  * @status:	Completion status
392  * @netbuf:	HTT buffer
393  */
394 static void
395 dp_htt_h2t_send_complete_free_netbuf(
396 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
397 {
398 	qdf_nbuf_free(netbuf);
399 }
400 
401 /*
402  * dp_htt_h2t_send_complete() - H2T completion handler
403  * @context:	Opaque context (HTT SOC handle)
404  * @htc_pkt:	HTC packet
405  */
406 static void
407 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
408 {
409 	void (*send_complete_part2)(
410 		void *soc, A_STATUS status, qdf_nbuf_t msdu);
411 	struct htt_soc *soc =  (struct htt_soc *) context;
412 	struct dp_htt_htc_pkt *htt_pkt;
413 	qdf_nbuf_t netbuf;
414 
415 	send_complete_part2 = htc_pkt->pPktContext;
416 
417 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
418 
419 	/* process (free or keep) the netbuf that held the message */
420 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
421 	/*
422 	 * adf sendcomplete is required for windows only
423 	 */
424 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
425 	if (send_complete_part2 != NULL) {
426 		send_complete_part2(
427 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
428 	}
429 	/* free the htt_htc_pkt / HTC_PACKET object */
430 	htt_htc_pkt_free(soc, htt_pkt);
431 }
432 
433 /*
434  * htt_h2t_ver_req_msg() - Send HTT version request message to target
435  * @htt_soc:	HTT SOC handle
436  *
437  * Return: 0 on success; error code on failure
438  */
439 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
440 {
441 	struct dp_htt_htc_pkt *pkt;
442 	qdf_nbuf_t msg;
443 	uint32_t *msg_word;
444 
445 	msg = qdf_nbuf_alloc(
446 		soc->osdev,
447 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
448 		/* reserve room for the HTC header */
449 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
450 	if (!msg)
451 		return QDF_STATUS_E_NOMEM;
452 
453 	/*
454 	 * Set the length of the message.
455 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
456 	 * separately during the below call to qdf_nbuf_push_head.
457 	 * The contribution from the HTC header is added separately inside HTC.
458 	 */
459 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
460 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
461 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
462 			__func__);
463 		return QDF_STATUS_E_FAILURE;
464 	}
465 
466 	/* fill in the message contents */
467 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
468 
469 	/* rewind beyond alignment pad to get to the HTC header reserved area */
470 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
471 
472 	*msg_word = 0;
473 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
474 
475 	pkt = htt_htc_pkt_alloc(soc);
476 	if (!pkt) {
477 		qdf_nbuf_free(msg);
478 		return QDF_STATUS_E_FAILURE;
479 	}
480 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
481 
482 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
483 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
484 		qdf_nbuf_len(msg), soc->htc_endpoint,
485 		1); /* tag - not relevant here */
486 
487 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
488 	DP_HTT_SEND_HTC_PKT(soc, pkt);
489 	return 0;
490 }
491 
492 /*
493  * htt_srng_setup() - Send SRNG setup message to target
494  * @htt_soc:	HTT SOC handle
495  * @mac_id:	MAC Id
496  * @hal_srng:	Opaque HAL SRNG pointer
497  * @hal_ring_type:	SRNG ring type
498  *
499  * Return: 0 on success; error code on failure
500  */
501 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
502 	int hal_ring_type)
503 {
504 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
505 	struct dp_htt_htc_pkt *pkt;
506 	qdf_nbuf_t htt_msg;
507 	uint32_t *msg_word;
508 	struct hal_srng_params srng_params;
509 	qdf_dma_addr_t hp_addr, tp_addr;
510 	uint32_t ring_entry_size =
511 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
512 	int htt_ring_type, htt_ring_id;
513 
514 	/* Sizes should be set in 4-byte words */
515 	ring_entry_size = ring_entry_size >> 2;
516 
517 	htt_msg = qdf_nbuf_alloc(soc->osdev,
518 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
519 		/* reserve room for the HTC header */
520 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
521 	if (!htt_msg)
522 		goto fail0;
523 
524 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
525 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
526 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
527 
528 	switch (hal_ring_type) {
529 	case RXDMA_BUF:
530 #ifdef QCA_HOST2FW_RXBUF_RING
531 		if (srng_params.ring_id ==
532 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
533 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
534 			htt_ring_type = HTT_SW_TO_SW_RING;
535 #ifdef IPA_OFFLOAD
536 		} else if (srng_params.ring_id ==
537 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
538 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
539 			htt_ring_type = HTT_SW_TO_SW_RING;
540 #endif
541 #else
542 		if (srng_params.ring_id ==
543 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
544 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
545 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
546 			htt_ring_type = HTT_SW_TO_HW_RING;
547 #endif
548 		} else if (srng_params.ring_id ==
549 #ifdef IPA_OFFLOAD
550 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
551 #else
552 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
553 #endif
554 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
555 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
556 			htt_ring_type = HTT_SW_TO_HW_RING;
557 		} else {
558 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
559 				   "%s: Ring %d currently not supported",
560 				   __func__, srng_params.ring_id);
561 			goto fail1;
562 		}
563 
564 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
565 			hal_ring_type, srng_params.ring_id, htt_ring_id,
566 			(uint64_t)hp_addr,
567 			(uint64_t)tp_addr);
568 		break;
569 	case RXDMA_MONITOR_BUF:
570 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
571 		htt_ring_type = HTT_SW_TO_HW_RING;
572 		break;
573 	case RXDMA_MONITOR_STATUS:
574 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
575 		htt_ring_type = HTT_SW_TO_HW_RING;
576 		break;
577 	case RXDMA_MONITOR_DST:
578 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
579 		htt_ring_type = HTT_HW_TO_SW_RING;
580 		break;
581 	case RXDMA_MONITOR_DESC:
582 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
583 		htt_ring_type = HTT_SW_TO_HW_RING;
584 		break;
585 	case RXDMA_DST:
586 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
587 		htt_ring_type = HTT_HW_TO_SW_RING;
588 		break;
589 
590 	default:
591 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
592 			"%s: Ring currently not supported", __func__);
593 			goto fail1;
594 	}
595 
596 	/*
597 	 * Set the length of the message.
598 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
599 	 * separately during the below call to qdf_nbuf_push_head.
600 	 * The contribution from the HTC header is added separately inside HTC.
601 	 */
602 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
603 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
604 			"%s: Failed to expand head for SRING_SETUP msg",
605 			__func__);
606 		return QDF_STATUS_E_FAILURE;
607 	}
608 
609 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
610 
611 	/* rewind beyond alignment pad to get to the HTC header reserved area */
612 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
613 
614 	/* word 0 */
615 	*msg_word = 0;
616 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
617 
618 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
619 			(htt_ring_type == HTT_HW_TO_SW_RING))
620 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
621 			 DP_SW2HW_MACID(mac_id));
622 	else
623 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
624 
625 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
626 		  "%s: mac_id %d", __func__, mac_id);
627 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
628 	/* TODO: Discuss with FW on changing this to unique ID and using
629 	 * htt_ring_type to send the type of ring
630 	 */
631 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
632 
633 	/* word 1 */
634 	msg_word++;
635 	*msg_word = 0;
636 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
637 		srng_params.ring_base_paddr & 0xffffffff);
638 
639 	/* word 2 */
640 	msg_word++;
641 	*msg_word = 0;
642 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
643 		(uint64_t)srng_params.ring_base_paddr >> 32);
644 
645 	/* word 3 */
646 	msg_word++;
647 	*msg_word = 0;
648 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
649 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
650 		(ring_entry_size * srng_params.num_entries));
651 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
652 		  "%s: entry_size %d", __func__,
653 			 ring_entry_size);
654 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
655 		  "%s: num_entries %d", __func__,
656 			 srng_params.num_entries);
657 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
658 		  "%s: ring_size %d", __func__,
659 			 (ring_entry_size * srng_params.num_entries));
660 	if (htt_ring_type == HTT_SW_TO_HW_RING)
661 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
662 						*msg_word, 1);
663 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
664 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
665 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
666 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
667 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
668 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
669 
670 	/* word 4 */
671 	msg_word++;
672 	*msg_word = 0;
673 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
674 		hp_addr & 0xffffffff);
675 
676 	/* word 5 */
677 	msg_word++;
678 	*msg_word = 0;
679 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
680 		(uint64_t)hp_addr >> 32);
681 
682 	/* word 6 */
683 	msg_word++;
684 	*msg_word = 0;
685 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
686 		tp_addr & 0xffffffff);
687 
688 	/* word 7 */
689 	msg_word++;
690 	*msg_word = 0;
691 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
692 		(uint64_t)tp_addr >> 32);
693 
694 	/* word 8 */
695 	msg_word++;
696 	*msg_word = 0;
697 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
698 		srng_params.msi_addr & 0xffffffff);
699 
700 	/* word 9 */
701 	msg_word++;
702 	*msg_word = 0;
703 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
704 		(uint64_t)(srng_params.msi_addr) >> 32);
705 
706 	/* word 10 */
707 	msg_word++;
708 	*msg_word = 0;
709 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
710 		srng_params.msi_data);
711 
712 	/* word 11 */
713 	msg_word++;
714 	*msg_word = 0;
715 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
716 		srng_params.intr_batch_cntr_thres_entries *
717 		ring_entry_size);
718 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
719 		srng_params.intr_timer_thres_us >> 3);
720 
721 	/* word 12 */
722 	msg_word++;
723 	*msg_word = 0;
724 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
725 		/* TODO: Setting low threshold to 1/8th of ring size - see
726 		 * if this needs to be configurable
727 		 */
728 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
729 			srng_params.low_threshold);
730 	}
731 	/* "response_required" field should be set if a HTT response message is
732 	 * required after setting up the ring.
733 	 */
734 	pkt = htt_htc_pkt_alloc(soc);
735 	if (!pkt)
736 		goto fail1;
737 
738 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
739 
740 	SET_HTC_PACKET_INFO_TX(
741 		&pkt->htc_pkt,
742 		dp_htt_h2t_send_complete_free_netbuf,
743 		qdf_nbuf_data(htt_msg),
744 		qdf_nbuf_len(htt_msg),
745 		soc->htc_endpoint,
746 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
747 
748 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
749 	DP_HTT_SEND_HTC_PKT(soc, pkt);
750 
751 	return QDF_STATUS_SUCCESS;
752 
753 fail1:
754 	qdf_nbuf_free(htt_msg);
755 fail0:
756 	return QDF_STATUS_E_FAILURE;
757 }
758 
759 /*
760  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
761  * config message to target
762  * @htt_soc:	HTT SOC handle
763  * @pdev_id:	PDEV Id
764  * @hal_srng:	Opaque HAL SRNG pointer
765  * @hal_ring_type:	SRNG ring type
766  * @ring_buf_size:	SRNG buffer size
767  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
768  * Return: 0 on success; error code on failure
769  */
770 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
771 	int hal_ring_type, int ring_buf_size,
772 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
773 {
774 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
775 	struct dp_htt_htc_pkt *pkt;
776 	qdf_nbuf_t htt_msg;
777 	uint32_t *msg_word;
778 	struct hal_srng_params srng_params;
779 	uint32_t htt_ring_type, htt_ring_id;
780 	uint32_t tlv_filter;
781 
782 	htt_msg = qdf_nbuf_alloc(soc->osdev,
783 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
784 	/* reserve room for the HTC header */
785 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
786 	if (!htt_msg)
787 		goto fail0;
788 
789 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
790 
791 	switch (hal_ring_type) {
792 	case RXDMA_BUF:
793 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
794 		htt_ring_type = HTT_SW_TO_HW_RING;
795 		break;
796 	case RXDMA_MONITOR_BUF:
797 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
798 		htt_ring_type = HTT_SW_TO_HW_RING;
799 		break;
800 	case RXDMA_MONITOR_STATUS:
801 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
802 		htt_ring_type = HTT_SW_TO_HW_RING;
803 		break;
804 	case RXDMA_MONITOR_DST:
805 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
806 		htt_ring_type = HTT_HW_TO_SW_RING;
807 		break;
808 	case RXDMA_MONITOR_DESC:
809 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
810 		htt_ring_type = HTT_SW_TO_HW_RING;
811 		break;
812 	case RXDMA_DST:
813 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
814 		htt_ring_type = HTT_HW_TO_SW_RING;
815 		break;
816 
817 	default:
818 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
819 			"%s: Ring currently not supported", __func__);
820 		goto fail1;
821 	}
822 
823 	/*
824 	 * Set the length of the message.
825 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
826 	 * separately during the below call to qdf_nbuf_push_head.
827 	 * The contribution from the HTC header is added separately inside HTC.
828 	 */
829 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
830 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
831 			"%s: Failed to expand head for RX Ring Cfg msg",
832 			__func__);
833 		goto fail1; /* failure */
834 	}
835 
836 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
837 
838 	/* rewind beyond alignment pad to get to the HTC header reserved area */
839 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
840 
841 	/* word 0 */
842 	*msg_word = 0;
843 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
844 
845 	/*
846 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
847 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
848 	 */
849 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
850 			htt_ring_type == HTT_SW_TO_HW_RING)
851 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
852 						DP_SW2HW_MACID(pdev_id));
853 
854 	/* TODO: Discuss with FW on changing this to unique ID and using
855 	 * htt_ring_type to send the type of ring
856 	 */
857 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
858 
859 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
860 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
861 
862 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
863 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
864 
865 	/* word 1 */
866 	msg_word++;
867 	*msg_word = 0;
868 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
869 		ring_buf_size);
870 
871 	/* word 2 */
872 	msg_word++;
873 	*msg_word = 0;
874 
875 	if (htt_tlv_filter->enable_fp) {
876 		/* TYPE: MGMT */
877 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
878 			FP, MGMT, 0000,
879 			(htt_tlv_filter->fp_mgmt_filter &
880 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
881 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
882 			FP, MGMT, 0001,
883 			(htt_tlv_filter->fp_mgmt_filter &
884 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
885 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
886 			FP, MGMT, 0010,
887 			(htt_tlv_filter->fp_mgmt_filter &
888 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
889 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
890 			FP, MGMT, 0011,
891 			(htt_tlv_filter->fp_mgmt_filter &
892 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
893 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
894 			FP, MGMT, 0100,
895 			(htt_tlv_filter->fp_mgmt_filter &
896 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
897 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
898 			FP, MGMT, 0101,
899 			(htt_tlv_filter->fp_mgmt_filter &
900 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
901 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
902 			FP, MGMT, 0110,
903 			(htt_tlv_filter->fp_mgmt_filter &
904 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
905 		/* reserved */
906 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
907 			MGMT, 0111,
908 			(htt_tlv_filter->fp_mgmt_filter &
909 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
910 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
911 			FP, MGMT, 1000,
912 			(htt_tlv_filter->fp_mgmt_filter &
913 			FILTER_MGMT_BEACON) ? 1 : 0);
914 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
915 			FP, MGMT, 1001,
916 			(htt_tlv_filter->fp_mgmt_filter &
917 			FILTER_MGMT_ATIM) ? 1 : 0);
918 	}
919 
920 	if (htt_tlv_filter->enable_md) {
921 			/* TYPE: MGMT */
922 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
923 			MD, MGMT, 0000,
924 			(htt_tlv_filter->md_mgmt_filter &
925 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
926 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
927 			MD, MGMT, 0001,
928 			(htt_tlv_filter->md_mgmt_filter &
929 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
930 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
931 			MD, MGMT, 0010,
932 			(htt_tlv_filter->md_mgmt_filter &
933 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
934 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
935 			MD, MGMT, 0011,
936 			(htt_tlv_filter->md_mgmt_filter &
937 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
938 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
939 			MD, MGMT, 0100,
940 			(htt_tlv_filter->md_mgmt_filter &
941 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
942 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
943 			MD, MGMT, 0101,
944 			(htt_tlv_filter->md_mgmt_filter &
945 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
946 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
947 			MD, MGMT, 0110,
948 			(htt_tlv_filter->md_mgmt_filter &
949 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
950 		/* reserved */
951 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
952 			MGMT, 0111,
953 			(htt_tlv_filter->md_mgmt_filter &
954 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
955 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
956 			MD, MGMT, 1000,
957 			(htt_tlv_filter->md_mgmt_filter &
958 			FILTER_MGMT_BEACON) ? 1 : 0);
959 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
960 			MD, MGMT, 1001,
961 			(htt_tlv_filter->md_mgmt_filter &
962 			FILTER_MGMT_ATIM) ? 1 : 0);
963 	}
964 
965 	if (htt_tlv_filter->enable_mo) {
966 		/* TYPE: MGMT */
967 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
968 			MO, MGMT, 0000,
969 			(htt_tlv_filter->mo_mgmt_filter &
970 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
971 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
972 			MO, MGMT, 0001,
973 			(htt_tlv_filter->mo_mgmt_filter &
974 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
975 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
976 			MO, MGMT, 0010,
977 			(htt_tlv_filter->mo_mgmt_filter &
978 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
979 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
980 			MO, MGMT, 0011,
981 			(htt_tlv_filter->mo_mgmt_filter &
982 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
983 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
984 			MO, MGMT, 0100,
985 			(htt_tlv_filter->mo_mgmt_filter &
986 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
987 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
988 			MO, MGMT, 0101,
989 			(htt_tlv_filter->mo_mgmt_filter &
990 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
991 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
992 			MO, MGMT, 0110,
993 			(htt_tlv_filter->mo_mgmt_filter &
994 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
995 		/* reserved */
996 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
997 			MGMT, 0111,
998 			(htt_tlv_filter->mo_mgmt_filter &
999 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1000 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1001 			MO, MGMT, 1000,
1002 			(htt_tlv_filter->mo_mgmt_filter &
1003 			FILTER_MGMT_BEACON) ? 1 : 0);
1004 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1005 			MO, MGMT, 1001,
1006 			(htt_tlv_filter->mo_mgmt_filter &
1007 			FILTER_MGMT_ATIM) ? 1 : 0);
1008 	}
1009 
1010 	/* word 3 */
1011 	msg_word++;
1012 	*msg_word = 0;
1013 
1014 	if (htt_tlv_filter->enable_fp) {
1015 		/* TYPE: MGMT */
1016 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1017 			FP, MGMT, 1010,
1018 			(htt_tlv_filter->fp_mgmt_filter &
1019 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1020 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1021 			FP, MGMT, 1011,
1022 			(htt_tlv_filter->fp_mgmt_filter &
1023 			FILTER_MGMT_AUTH) ? 1 : 0);
1024 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1025 			FP, MGMT, 1100,
1026 			(htt_tlv_filter->fp_mgmt_filter &
1027 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1028 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1029 			FP, MGMT, 1101,
1030 			(htt_tlv_filter->fp_mgmt_filter &
1031 			FILTER_MGMT_ACTION) ? 1 : 0);
1032 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1033 			FP, MGMT, 1110,
1034 			(htt_tlv_filter->fp_mgmt_filter &
1035 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1036 		/* reserved*/
1037 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1038 			MGMT, 1111,
1039 			(htt_tlv_filter->fp_mgmt_filter &
1040 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1041 	}
1042 
1043 	if (htt_tlv_filter->enable_md) {
1044 			/* TYPE: MGMT */
1045 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1046 			MD, MGMT, 1010,
1047 			(htt_tlv_filter->md_mgmt_filter &
1048 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1049 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1050 			MD, MGMT, 1011,
1051 			(htt_tlv_filter->md_mgmt_filter &
1052 			FILTER_MGMT_AUTH) ? 1 : 0);
1053 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1054 			MD, MGMT, 1100,
1055 			(htt_tlv_filter->md_mgmt_filter &
1056 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1057 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1058 			MD, MGMT, 1101,
1059 			(htt_tlv_filter->md_mgmt_filter &
1060 			FILTER_MGMT_ACTION) ? 1 : 0);
1061 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1062 			MD, MGMT, 1110,
1063 			(htt_tlv_filter->md_mgmt_filter &
1064 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1065 	}
1066 
1067 	if (htt_tlv_filter->enable_mo) {
1068 		/* TYPE: MGMT */
1069 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1070 			MO, MGMT, 1010,
1071 			(htt_tlv_filter->mo_mgmt_filter &
1072 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1073 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1074 			MO, MGMT, 1011,
1075 			(htt_tlv_filter->mo_mgmt_filter &
1076 			FILTER_MGMT_AUTH) ? 1 : 0);
1077 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1078 			MO, MGMT, 1100,
1079 			(htt_tlv_filter->mo_mgmt_filter &
1080 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1081 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1082 			MO, MGMT, 1101,
1083 			(htt_tlv_filter->mo_mgmt_filter &
1084 			FILTER_MGMT_ACTION) ? 1 : 0);
1085 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1086 			MO, MGMT, 1110,
1087 			(htt_tlv_filter->mo_mgmt_filter &
1088 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1089 		/* reserved*/
1090 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1091 			MGMT, 1111,
1092 			(htt_tlv_filter->mo_mgmt_filter &
1093 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1094 	}
1095 
1096 	/* word 4 */
1097 	msg_word++;
1098 	*msg_word = 0;
1099 
1100 	if (htt_tlv_filter->enable_fp) {
1101 		/* TYPE: CTRL */
1102 		/* reserved */
1103 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1104 			CTRL, 0000,
1105 			(htt_tlv_filter->fp_ctrl_filter &
1106 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1107 		/* reserved */
1108 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1109 			CTRL, 0001,
1110 			(htt_tlv_filter->fp_ctrl_filter &
1111 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1112 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1113 			CTRL, 0010,
1114 			(htt_tlv_filter->fp_ctrl_filter &
1115 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1116 		/* reserved */
1117 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1118 			CTRL, 0011,
1119 			(htt_tlv_filter->fp_ctrl_filter &
1120 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1121 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1122 			CTRL, 0100,
1123 			(htt_tlv_filter->fp_ctrl_filter &
1124 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1125 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1126 			CTRL, 0101,
1127 			(htt_tlv_filter->fp_ctrl_filter &
1128 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1129 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1130 			CTRL, 0110,
1131 			(htt_tlv_filter->fp_ctrl_filter &
1132 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1133 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1134 			CTRL, 0111,
1135 			(htt_tlv_filter->fp_ctrl_filter &
1136 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1137 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1138 			CTRL, 1000,
1139 			(htt_tlv_filter->fp_ctrl_filter &
1140 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1141 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1142 			CTRL, 1001,
1143 			(htt_tlv_filter->fp_ctrl_filter &
1144 			FILTER_CTRL_BA) ? 1 : 0);
1145 	}
1146 
1147 	if (htt_tlv_filter->enable_md) {
1148 		/* TYPE: CTRL */
1149 		/* reserved */
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1151 			CTRL, 0000,
1152 			(htt_tlv_filter->md_ctrl_filter &
1153 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1154 		/* reserved */
1155 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1156 			CTRL, 0001,
1157 			(htt_tlv_filter->md_ctrl_filter &
1158 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1159 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1160 			CTRL, 0010,
1161 			(htt_tlv_filter->md_ctrl_filter &
1162 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1163 		/* reserved */
1164 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1165 			CTRL, 0011,
1166 			(htt_tlv_filter->md_ctrl_filter &
1167 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1168 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1169 			CTRL, 0100,
1170 			(htt_tlv_filter->md_ctrl_filter &
1171 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1172 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1173 			CTRL, 0101,
1174 			(htt_tlv_filter->md_ctrl_filter &
1175 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1176 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1177 			CTRL, 0110,
1178 			(htt_tlv_filter->md_ctrl_filter &
1179 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1180 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1181 			CTRL, 0111,
1182 			(htt_tlv_filter->md_ctrl_filter &
1183 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1184 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1185 			CTRL, 1000,
1186 			(htt_tlv_filter->md_ctrl_filter &
1187 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1188 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1189 			CTRL, 1001,
1190 			(htt_tlv_filter->md_ctrl_filter &
1191 			FILTER_CTRL_BA) ? 1 : 0);
1192 	}
1193 
1194 	if (htt_tlv_filter->enable_mo) {
1195 		/* TYPE: CTRL */
1196 		/* reserved */
1197 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1198 			CTRL, 0000,
1199 			(htt_tlv_filter->mo_ctrl_filter &
1200 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1201 		/* reserved */
1202 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1203 			CTRL, 0001,
1204 			(htt_tlv_filter->mo_ctrl_filter &
1205 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1206 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1207 			CTRL, 0010,
1208 			(htt_tlv_filter->mo_ctrl_filter &
1209 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1210 		/* reserved */
1211 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1212 			CTRL, 0011,
1213 			(htt_tlv_filter->mo_ctrl_filter &
1214 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1215 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1216 			CTRL, 0100,
1217 			(htt_tlv_filter->mo_ctrl_filter &
1218 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1219 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1220 			CTRL, 0101,
1221 			(htt_tlv_filter->mo_ctrl_filter &
1222 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1223 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1224 			CTRL, 0110,
1225 			(htt_tlv_filter->mo_ctrl_filter &
1226 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1227 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1228 			CTRL, 0111,
1229 			(htt_tlv_filter->mo_ctrl_filter &
1230 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1231 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1232 			CTRL, 1000,
1233 			(htt_tlv_filter->mo_ctrl_filter &
1234 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1235 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1236 			CTRL, 1001,
1237 			(htt_tlv_filter->mo_ctrl_filter &
1238 			FILTER_CTRL_BA) ? 1 : 0);
1239 	}
1240 
1241 	/* word 5 */
1242 	msg_word++;
1243 	*msg_word = 0;
1244 	if (htt_tlv_filter->enable_fp) {
1245 		/* TYPE: CTRL */
1246 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1247 			CTRL, 1010,
1248 			(htt_tlv_filter->fp_ctrl_filter &
1249 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1250 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1251 			CTRL, 1011,
1252 			(htt_tlv_filter->fp_ctrl_filter &
1253 			FILTER_CTRL_RTS) ? 1 : 0);
1254 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1255 			CTRL, 1100,
1256 			(htt_tlv_filter->fp_ctrl_filter &
1257 			FILTER_CTRL_CTS) ? 1 : 0);
1258 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1259 			CTRL, 1101,
1260 			(htt_tlv_filter->fp_ctrl_filter &
1261 			FILTER_CTRL_ACK) ? 1 : 0);
1262 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1263 			CTRL, 1110,
1264 			(htt_tlv_filter->fp_ctrl_filter &
1265 			FILTER_CTRL_CFEND) ? 1 : 0);
1266 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1267 			CTRL, 1111,
1268 			(htt_tlv_filter->fp_ctrl_filter &
1269 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1270 		/* TYPE: DATA */
1271 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1272 			DATA, MCAST,
1273 			(htt_tlv_filter->fp_data_filter &
1274 			FILTER_DATA_MCAST) ? 1 : 0);
1275 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1276 			DATA, UCAST,
1277 			(htt_tlv_filter->fp_data_filter &
1278 			FILTER_DATA_UCAST) ? 1 : 0);
1279 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1280 			DATA, NULL,
1281 			(htt_tlv_filter->fp_data_filter &
1282 			FILTER_DATA_NULL) ? 1 : 0);
1283 	}
1284 
1285 	if (htt_tlv_filter->enable_md) {
1286 		/* TYPE: CTRL */
1287 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1288 			CTRL, 1010,
1289 			(htt_tlv_filter->md_ctrl_filter &
1290 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1291 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1292 			CTRL, 1011,
1293 			(htt_tlv_filter->md_ctrl_filter &
1294 			FILTER_CTRL_RTS) ? 1 : 0);
1295 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1296 			CTRL, 1100,
1297 			(htt_tlv_filter->md_ctrl_filter &
1298 			FILTER_CTRL_CTS) ? 1 : 0);
1299 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1300 			CTRL, 1101,
1301 			(htt_tlv_filter->md_ctrl_filter &
1302 			FILTER_CTRL_ACK) ? 1 : 0);
1303 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1304 			CTRL, 1110,
1305 			(htt_tlv_filter->md_ctrl_filter &
1306 			FILTER_CTRL_CFEND) ? 1 : 0);
1307 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1308 			CTRL, 1111,
1309 			(htt_tlv_filter->md_ctrl_filter &
1310 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1311 		/* TYPE: DATA */
1312 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1313 			DATA, MCAST,
1314 			(htt_tlv_filter->md_data_filter &
1315 			FILTER_DATA_MCAST) ? 1 : 0);
1316 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1317 			DATA, UCAST,
1318 			(htt_tlv_filter->md_data_filter &
1319 			FILTER_DATA_UCAST) ? 1 : 0);
1320 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1321 			DATA, NULL,
1322 			(htt_tlv_filter->md_data_filter &
1323 			FILTER_DATA_NULL) ? 1 : 0);
1324 	}
1325 
1326 	if (htt_tlv_filter->enable_mo) {
1327 		/* TYPE: CTRL */
1328 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1329 			CTRL, 1010,
1330 			(htt_tlv_filter->mo_ctrl_filter &
1331 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1332 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1333 			CTRL, 1011,
1334 			(htt_tlv_filter->mo_ctrl_filter &
1335 			FILTER_CTRL_RTS) ? 1 : 0);
1336 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1337 			CTRL, 1100,
1338 			(htt_tlv_filter->mo_ctrl_filter &
1339 			FILTER_CTRL_CTS) ? 1 : 0);
1340 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1341 			CTRL, 1101,
1342 			(htt_tlv_filter->mo_ctrl_filter &
1343 			FILTER_CTRL_ACK) ? 1 : 0);
1344 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1345 			CTRL, 1110,
1346 			(htt_tlv_filter->mo_ctrl_filter &
1347 			FILTER_CTRL_CFEND) ? 1 : 0);
1348 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1349 			CTRL, 1111,
1350 			(htt_tlv_filter->mo_ctrl_filter &
1351 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1352 		/* TYPE: DATA */
1353 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1354 			DATA, MCAST,
1355 			(htt_tlv_filter->mo_data_filter &
1356 			FILTER_DATA_MCAST) ? 1 : 0);
1357 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1358 			DATA, UCAST,
1359 			(htt_tlv_filter->mo_data_filter &
1360 			FILTER_DATA_UCAST) ? 1 : 0);
1361 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1362 			DATA, NULL,
1363 			(htt_tlv_filter->mo_data_filter &
1364 			FILTER_DATA_NULL) ? 1 : 0);
1365 	}
1366 
1367 	/* word 6 */
1368 	msg_word++;
1369 	*msg_word = 0;
1370 	tlv_filter = 0;
1371 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1372 		htt_tlv_filter->mpdu_start);
1373 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1374 		htt_tlv_filter->msdu_start);
1375 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1376 		htt_tlv_filter->packet);
1377 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1378 		htt_tlv_filter->msdu_end);
1379 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1380 		htt_tlv_filter->mpdu_end);
1381 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1382 		htt_tlv_filter->packet_header);
1383 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1384 		htt_tlv_filter->attention);
1385 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1386 		htt_tlv_filter->ppdu_start);
1387 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1388 		htt_tlv_filter->ppdu_end);
1389 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1390 		htt_tlv_filter->ppdu_end_user_stats);
1391 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1392 		PPDU_END_USER_STATS_EXT,
1393 		htt_tlv_filter->ppdu_end_user_stats_ext);
1394 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1395 		htt_tlv_filter->ppdu_end_status_done);
1396 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1397 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1398 		 htt_tlv_filter->header_per_msdu);
1399 
1400 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1401 
1402 	/* "response_required" field should be set if a HTT response message is
1403 	 * required after setting up the ring.
1404 	 */
1405 	pkt = htt_htc_pkt_alloc(soc);
1406 	if (!pkt)
1407 		goto fail1;
1408 
1409 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1410 
1411 	SET_HTC_PACKET_INFO_TX(
1412 		&pkt->htc_pkt,
1413 		dp_htt_h2t_send_complete_free_netbuf,
1414 		qdf_nbuf_data(htt_msg),
1415 		qdf_nbuf_len(htt_msg),
1416 		soc->htc_endpoint,
1417 		1); /* tag - not relevant here */
1418 
1419 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1420 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1421 	return QDF_STATUS_SUCCESS;
1422 
1423 fail1:
1424 	qdf_nbuf_free(htt_msg);
1425 fail0:
1426 	return QDF_STATUS_E_FAILURE;
1427 }
1428 
1429 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1430 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1431 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1432 
1433 {
1434 	uint32_t pdev_id;
1435 	uint32_t *msg_word = NULL;
1436 	uint32_t msg_remain_len = 0;
1437 
1438 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1439 
1440 	/*COOKIE MSB*/
1441 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1442 
1443 	/* stats message length + 16 size of HTT header*/
1444 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1445 				(uint32_t)DP_EXT_MSG_LENGTH);
1446 
1447 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1448 			msg_word,  msg_remain_len,
1449 			WDI_NO_VAL, pdev_id);
1450 
1451 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1452 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1453 	}
1454 	/* Need to be freed here as WDI handler will
1455 	 * make a copy of pkt to send data to application
1456 	 */
1457 	qdf_nbuf_free(htt_msg);
1458 	return QDF_STATUS_SUCCESS;
1459 }
1460 #else
1461 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1462 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1463 {
1464 	return QDF_STATUS_E_NOSUPPORT;
1465 }
1466 #endif
1467 
1468 /**
1469  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1470  * @htt_stats: htt stats info
1471  *
1472  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1473  * contains sub messages which are identified by a TLV header.
1474  * In this function we will process the stream of T2H messages and read all the
1475  * TLV contained in the message.
1476  *
1477  * THe following cases have been taken care of
1478  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1479  *		In this case the buffer will contain multiple tlvs.
1480  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1481  *		Only one tlv will be contained in the HTT message and this tag
1482  *		will extend onto the next buffer.
1483  * Case 3: When the buffer is the continuation of the previous message
1484  * Case 4: tlv length is 0. which will indicate the end of message
1485  *
1486  * return: void
1487  */
1488 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1489 					struct dp_soc *soc)
1490 {
1491 	htt_tlv_tag_t tlv_type = 0xff;
1492 	qdf_nbuf_t htt_msg = NULL;
1493 	uint32_t *msg_word;
1494 	uint8_t *tlv_buf_head = NULL;
1495 	uint8_t *tlv_buf_tail = NULL;
1496 	uint32_t msg_remain_len = 0;
1497 	uint32_t tlv_remain_len = 0;
1498 	uint32_t *tlv_start;
1499 	int cookie_val;
1500 	int cookie_msb;
1501 	int pdev_id;
1502 	bool copy_stats = false;
1503 	struct dp_pdev *pdev;
1504 
1505 	/* Process node in the HTT message queue */
1506 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1507 		!= NULL) {
1508 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1509 		cookie_val = *(msg_word + 1);
1510 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1511 					*(msg_word +
1512 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1513 
1514 		if (cookie_val) {
1515 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1516 					== QDF_STATUS_SUCCESS) {
1517 				continue;
1518 			}
1519 		}
1520 
1521 		cookie_msb = *(msg_word + 2);
1522 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1523 		pdev = soc->pdev_list[pdev_id];
1524 
1525 		if (cookie_msb >> 2) {
1526 			copy_stats = true;
1527 		}
1528 
1529 		/* read 5th word */
1530 		msg_word = msg_word + 4;
1531 		msg_remain_len = qdf_min(htt_stats->msg_len,
1532 				(uint32_t) DP_EXT_MSG_LENGTH);
1533 		/* Keep processing the node till node length is 0 */
1534 		while (msg_remain_len) {
1535 			/*
1536 			 * if message is not a continuation of previous message
1537 			 * read the tlv type and tlv length
1538 			 */
1539 			if (!tlv_buf_head) {
1540 				tlv_type = HTT_STATS_TLV_TAG_GET(
1541 						*msg_word);
1542 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1543 						*msg_word);
1544 			}
1545 
1546 			if (tlv_remain_len == 0) {
1547 				msg_remain_len = 0;
1548 
1549 				if (tlv_buf_head) {
1550 					qdf_mem_free(tlv_buf_head);
1551 					tlv_buf_head = NULL;
1552 					tlv_buf_tail = NULL;
1553 				}
1554 
1555 				goto error;
1556 			}
1557 
1558 			if (!tlv_buf_head)
1559 				tlv_remain_len += HTT_TLV_HDR_LEN;
1560 
1561 			if ((tlv_remain_len <= msg_remain_len)) {
1562 				/* Case 3 */
1563 				if (tlv_buf_head) {
1564 					qdf_mem_copy(tlv_buf_tail,
1565 							(uint8_t *)msg_word,
1566 							tlv_remain_len);
1567 					tlv_start = (uint32_t *)tlv_buf_head;
1568 				} else {
1569 					/* Case 1 */
1570 					tlv_start = msg_word;
1571 				}
1572 
1573 				if (copy_stats)
1574 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1575 				else
1576 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1577 
1578 				msg_remain_len -= tlv_remain_len;
1579 
1580 				msg_word = (uint32_t *)
1581 					(((uint8_t *)msg_word) +
1582 					tlv_remain_len);
1583 
1584 				tlv_remain_len = 0;
1585 
1586 				if (tlv_buf_head) {
1587 					qdf_mem_free(tlv_buf_head);
1588 					tlv_buf_head = NULL;
1589 					tlv_buf_tail = NULL;
1590 				}
1591 
1592 			} else { /* tlv_remain_len > msg_remain_len */
1593 				/* Case 2 & 3 */
1594 				if (!tlv_buf_head) {
1595 					tlv_buf_head = qdf_mem_malloc(
1596 							tlv_remain_len);
1597 
1598 					if (!tlv_buf_head) {
1599 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1600 								QDF_TRACE_LEVEL_ERROR,
1601 								"Alloc failed");
1602 						goto error;
1603 					}
1604 
1605 					tlv_buf_tail = tlv_buf_head;
1606 				}
1607 
1608 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1609 						msg_remain_len);
1610 				tlv_remain_len -= msg_remain_len;
1611 				tlv_buf_tail += msg_remain_len;
1612 			}
1613 		}
1614 
1615 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1616 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1617 		}
1618 
1619 		qdf_nbuf_free(htt_msg);
1620 	}
1621 	return;
1622 
1623 error:
1624 	qdf_nbuf_free(htt_msg);
1625 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1626 			!= NULL)
1627 		qdf_nbuf_free(htt_msg);
1628 }
1629 
1630 void htt_t2h_stats_handler(void *context)
1631 {
1632 	struct dp_soc *soc = (struct dp_soc *)context;
1633 	struct htt_stats_context htt_stats;
1634 	uint32_t *msg_word;
1635 	qdf_nbuf_t htt_msg = NULL;
1636 	uint8_t done;
1637 	uint8_t rem_stats;
1638 
1639 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1640 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1641 			"soc: 0x%pK, init_done: %d", soc,
1642 			qdf_atomic_read(&soc->cmn_init_done));
1643 		return;
1644 	}
1645 
1646 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1647 	qdf_nbuf_queue_init(&htt_stats.msg);
1648 
1649 	/* pull one completed stats from soc->htt_stats_msg and process */
1650 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1651 	if (!soc->htt_stats.num_stats) {
1652 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1653 		return;
1654 	}
1655 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1656 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1657 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1658 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1659 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1660 		/*
1661 		 * Done bit signifies that this is the last T2H buffer in the
1662 		 * stream of HTT EXT STATS message
1663 		 */
1664 		if (done)
1665 			break;
1666 	}
1667 	rem_stats = --soc->htt_stats.num_stats;
1668 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1669 
1670 	dp_process_htt_stat_msg(&htt_stats, soc);
1671 	/* If there are more stats to process, schedule stats work again */
1672 	if (rem_stats)
1673 		qdf_sched_work(0, &soc->htt_stats.work);
1674 }
1675 
1676 /*
1677  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1678  * if a new peer id arrives in a PPDU
1679  * pdev: DP pdev handle
1680  * @peer_id : peer unique identifier
1681  * @ppdu_info: per ppdu tlv structure
1682  *
1683  * return:user index to be populated
1684  */
1685 #ifdef FEATURE_PERPKT_INFO
1686 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1687 						uint16_t peer_id,
1688 						struct ppdu_info *ppdu_info)
1689 {
1690 	uint8_t user_index = 0;
1691 	struct cdp_tx_completion_ppdu *ppdu_desc;
1692 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1693 
1694 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1695 
1696 	while ((user_index + 1) <= ppdu_info->last_user) {
1697 		ppdu_user_desc = &ppdu_desc->user[user_index];
1698 		if (ppdu_user_desc->peer_id != peer_id) {
1699 			user_index++;
1700 			continue;
1701 		} else {
1702 			/* Max users possible is 8 so user array index should
1703 			 * not exceed 7
1704 			 */
1705 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1706 			return user_index;
1707 		}
1708 	}
1709 
1710 	ppdu_info->last_user++;
1711 	/* Max users possible is 8 so last user should not exceed 8 */
1712 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1713 	return ppdu_info->last_user - 1;
1714 }
1715 
1716 /*
1717  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1718  * pdev: DP pdev handle
1719  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1720  * @ppdu_info: per ppdu tlv structure
1721  *
1722  * return:void
1723  */
1724 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1725 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1726 {
1727 	uint16_t frame_type;
1728 	uint16_t freq;
1729 	struct dp_soc *soc = NULL;
1730 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1731 
1732 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1733 
1734 	tag_buf += 2;
1735 	ppdu_desc->num_users =
1736 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1737 	tag_buf++;
1738 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1739 
1740 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1741 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1742 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1743 	else if ((frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR) ||
1744 		 (frame_type == HTT_STATS_FTYPE_SGEN_BAR))
1745 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1746 	else
1747 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1748 
1749 	tag_buf += 2;
1750 	ppdu_desc->tx_duration = *tag_buf;
1751 	tag_buf += 3;
1752 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1753 
1754 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1755 					ppdu_desc->tx_duration;
1756 	/* Ack time stamp is same as end time stamp*/
1757 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1758 
1759 	tag_buf++;
1760 
1761 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1762 	if (freq != ppdu_desc->channel) {
1763 		soc = pdev->soc;
1764 		ppdu_desc->channel = freq;
1765 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1766 			pdev->operating_channel =
1767 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
1768 	}
1769 
1770 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1771 }
1772 
1773 /*
1774  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1775  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1776  * @ppdu_info: per ppdu tlv structure
1777  *
1778  * return:void
1779  */
1780 static void dp_process_ppdu_stats_user_common_tlv(
1781 		struct dp_pdev *pdev, uint32_t *tag_buf,
1782 		struct ppdu_info *ppdu_info)
1783 {
1784 	uint16_t peer_id;
1785 	struct cdp_tx_completion_ppdu *ppdu_desc;
1786 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1787 	uint8_t curr_user_index = 0;
1788 
1789 	ppdu_desc =
1790 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1791 
1792 	tag_buf++;
1793 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1794 
1795 	curr_user_index =
1796 		dp_get_ppdu_info_user_index(pdev,
1797 					    peer_id, ppdu_info);
1798 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1799 
1800 	if (peer_id == DP_SCAN_PEER_ID) {
1801 		ppdu_desc->vdev_id =
1802 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
1803 	} else {
1804 		if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
1805 			return;
1806 	}
1807 
1808 	ppdu_user_desc->peer_id = peer_id;
1809 
1810 	tag_buf++;
1811 
1812 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1813 		ppdu_user_desc->delayed_ba = 1;
1814 	}
1815 
1816 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1817 		ppdu_user_desc->is_mcast = true;
1818 		ppdu_user_desc->mpdu_tried_mcast =
1819 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1820 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1821 	} else {
1822 		ppdu_user_desc->mpdu_tried_ucast =
1823 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1824 	}
1825 
1826 	tag_buf++;
1827 
1828 	ppdu_user_desc->qos_ctrl =
1829 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1830 	ppdu_user_desc->frame_ctrl =
1831 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1832 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1833 
1834 	if (ppdu_user_desc->delayed_ba) {
1835 		ppdu_user_desc->mpdu_success = 0;
1836 		ppdu_user_desc->mpdu_tried_mcast = 0;
1837 		ppdu_user_desc->mpdu_tried_ucast = 0;
1838 	}
1839 }
1840 
1841 
1842 /**
1843  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1844  * @pdev: DP pdev handle
1845  * @tag_buf: T2H message buffer carrying the user rate TLV
1846  * @ppdu_info: per ppdu tlv structure
1847  *
1848  * return:void
1849  */
1850 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1851 		uint32_t *tag_buf,
1852 		struct ppdu_info *ppdu_info)
1853 {
1854 	uint16_t peer_id;
1855 	struct dp_peer *peer;
1856 	struct cdp_tx_completion_ppdu *ppdu_desc;
1857 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1858 	uint8_t curr_user_index = 0;
1859 	struct dp_vdev *vdev;
1860 
1861 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1862 
1863 	tag_buf++;
1864 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1865 
1866 	curr_user_index =
1867 		dp_get_ppdu_info_user_index(pdev,
1868 					    peer_id, ppdu_info);
1869 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1870 	if (peer_id == DP_SCAN_PEER_ID) {
1871 		vdev =
1872 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
1873 							  ppdu_desc->vdev_id);
1874 		if (!vdev)
1875 			return;
1876 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
1877 			     DP_MAC_ADDR_LEN);
1878 	} else {
1879 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1880 		if (!peer)
1881 			return;
1882 		qdf_mem_copy(ppdu_user_desc->mac_addr,
1883 			     peer->mac_addr.raw, DP_MAC_ADDR_LEN);
1884 		dp_peer_unref_del_find_by_id(peer);
1885 	}
1886 
1887 	ppdu_user_desc->peer_id = peer_id;
1888 
1889 	ppdu_user_desc->tid =
1890 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1891 
1892 	tag_buf += 2;
1893 
1894 	ppdu_user_desc->ru_tones =
1895 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1896 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1897 
1898 	tag_buf += 2;
1899 
1900 	ppdu_user_desc->ppdu_type =
1901 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1902 
1903 	tag_buf++;
1904 	ppdu_user_desc->tx_rate = *tag_buf;
1905 
1906 	ppdu_user_desc->ltf_size =
1907 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1908 	ppdu_user_desc->stbc =
1909 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1910 	ppdu_user_desc->he_re =
1911 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1912 	ppdu_user_desc->txbf =
1913 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
1914 	ppdu_user_desc->bw =
1915 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
1916 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
1917 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
1918 	ppdu_user_desc->preamble =
1919 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
1920 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
1921 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
1922 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
1923 }
1924 
1925 /*
1926  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
1927  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1928  * pdev: DP PDEV handle
1929  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1930  * @ppdu_info: per ppdu tlv structure
1931  *
1932  * return:void
1933  */
1934 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
1935 		struct dp_pdev *pdev, uint32_t *tag_buf,
1936 		struct ppdu_info *ppdu_info)
1937 {
1938 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
1939 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
1940 
1941 	struct cdp_tx_completion_ppdu *ppdu_desc;
1942 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1943 	uint8_t curr_user_index = 0;
1944 	uint16_t peer_id;
1945 
1946 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1947 
1948 	tag_buf++;
1949 
1950 	peer_id =
1951 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1952 
1953 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
1954 		return;
1955 
1956 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1957 
1958 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1959 	ppdu_user_desc->peer_id = peer_id;
1960 
1961 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1962 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1963 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1964 }
1965 
1966 /*
1967  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
1968  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1969  * soc: DP SOC handle
1970  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1971  * @ppdu_info: per ppdu tlv structure
1972  *
1973  * return:void
1974  */
1975 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
1976 		struct dp_pdev *pdev, uint32_t *tag_buf,
1977 		struct ppdu_info *ppdu_info)
1978 {
1979 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
1980 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
1981 
1982 	struct cdp_tx_completion_ppdu *ppdu_desc;
1983 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1984 	uint8_t curr_user_index = 0;
1985 	uint16_t peer_id;
1986 
1987 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1988 
1989 	tag_buf++;
1990 
1991 	peer_id =
1992 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1993 
1994 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
1995 		return;
1996 
1997 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1998 
1999 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2000 	ppdu_user_desc->peer_id = peer_id;
2001 
2002 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2003 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2004 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2005 }
2006 
2007 /*
2008  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2009  * htt_ppdu_stats_user_cmpltn_common_tlv
2010  * soc: DP SOC handle
2011  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2012  * @ppdu_info: per ppdu tlv structure
2013  *
2014  * return:void
2015  */
2016 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2017 		struct dp_pdev *pdev, uint32_t *tag_buf,
2018 		struct ppdu_info *ppdu_info)
2019 {
2020 	uint16_t peer_id;
2021 	struct cdp_tx_completion_ppdu *ppdu_desc;
2022 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2023 	uint8_t curr_user_index = 0;
2024 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2025 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2026 
2027 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2028 
2029 	tag_buf++;
2030 	peer_id =
2031 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2032 
2033 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2034 		return;
2035 
2036 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2037 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2038 	ppdu_user_desc->peer_id = peer_id;
2039 
2040 	ppdu_user_desc->completion_status =
2041 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2042 				*tag_buf);
2043 
2044 	ppdu_user_desc->tid =
2045 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2046 
2047 
2048 	tag_buf++;
2049 	if (qdf_likely(ppdu_user_desc->completion_status ==
2050 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2051 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2052 		ppdu_user_desc->ack_rssi_valid = 1;
2053 	} else {
2054 		ppdu_user_desc->ack_rssi_valid = 0;
2055 	}
2056 
2057 	tag_buf++;
2058 
2059 	ppdu_user_desc->mpdu_success =
2060 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2061 
2062 	tag_buf++;
2063 
2064 	ppdu_user_desc->long_retries =
2065 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2066 
2067 	ppdu_user_desc->short_retries =
2068 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2069 	ppdu_user_desc->retry_msdus =
2070 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2071 
2072 	ppdu_user_desc->is_ampdu =
2073 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2074 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2075 
2076 }
2077 
2078 /*
2079  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2080  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2081  * pdev: DP PDEV handle
2082  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2083  * @ppdu_info: per ppdu tlv structure
2084  *
2085  * return:void
2086  */
2087 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2088 		struct dp_pdev *pdev, uint32_t *tag_buf,
2089 		struct ppdu_info *ppdu_info)
2090 {
2091 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2092 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2093 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2094 	struct cdp_tx_completion_ppdu *ppdu_desc;
2095 	uint8_t curr_user_index = 0;
2096 	uint16_t peer_id;
2097 
2098 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2099 
2100 	tag_buf++;
2101 
2102 	peer_id =
2103 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2104 
2105 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2106 		return;
2107 
2108 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2109 
2110 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2111 	ppdu_user_desc->peer_id = peer_id;
2112 
2113 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2114 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2115 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2116 }
2117 
2118 /*
2119  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2120  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2121  * pdev: DP PDEV handle
2122  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2123  * @ppdu_info: per ppdu tlv structure
2124  *
2125  * return:void
2126  */
2127 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2128 		struct dp_pdev *pdev, uint32_t *tag_buf,
2129 		struct ppdu_info *ppdu_info)
2130 {
2131 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2132 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2133 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2134 	struct cdp_tx_completion_ppdu *ppdu_desc;
2135 	uint8_t curr_user_index = 0;
2136 	uint16_t peer_id;
2137 
2138 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2139 
2140 	tag_buf++;
2141 
2142 	peer_id =
2143 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2144 
2145 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2146 		return;
2147 
2148 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2149 
2150 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2151 	ppdu_user_desc->peer_id = peer_id;
2152 
2153 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2154 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2155 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2156 }
2157 
2158 /*
2159  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2160  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2161  * pdev: DP PDE handle
2162  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2163  * @ppdu_info: per ppdu tlv structure
2164  *
2165  * return:void
2166  */
2167 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2168 		struct dp_pdev *pdev, uint32_t *tag_buf,
2169 		struct ppdu_info *ppdu_info)
2170 {
2171 	uint16_t peer_id;
2172 	struct cdp_tx_completion_ppdu *ppdu_desc;
2173 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2174 	uint8_t curr_user_index = 0;
2175 
2176 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2177 
2178 	tag_buf += 2;
2179 	peer_id =
2180 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2181 
2182 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2183 		return;
2184 
2185 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2186 
2187 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2188 	ppdu_user_desc->peer_id = peer_id;
2189 
2190 	tag_buf++;
2191 	ppdu_user_desc->tid =
2192 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2193 	ppdu_user_desc->num_mpdu =
2194 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2195 
2196 	ppdu_user_desc->num_msdu =
2197 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2198 
2199 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2200 
2201 	tag_buf += 2;
2202 	ppdu_user_desc->success_bytes = *tag_buf;
2203 
2204 }
2205 
2206 /*
2207  * dp_process_ppdu_stats_user_common_array_tlv: Process
2208  * htt_ppdu_stats_user_common_array_tlv
2209  * pdev: DP PDEV handle
2210  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2211  * @ppdu_info: per ppdu tlv structure
2212  *
2213  * return:void
2214  */
2215 static void dp_process_ppdu_stats_user_common_array_tlv(
2216 		struct dp_pdev *pdev, uint32_t *tag_buf,
2217 		struct ppdu_info *ppdu_info)
2218 {
2219 	uint32_t peer_id;
2220 	struct cdp_tx_completion_ppdu *ppdu_desc;
2221 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2222 	uint8_t curr_user_index = 0;
2223 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2224 
2225 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2226 
2227 	tag_buf++;
2228 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2229 	tag_buf += 3;
2230 	peer_id =
2231 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2232 
2233 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2234 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2235 			"Invalid peer");
2236 		return;
2237 	}
2238 
2239 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2240 
2241 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2242 
2243 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2244 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2245 
2246 	tag_buf++;
2247 
2248 	ppdu_user_desc->success_msdus =
2249 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2250 	ppdu_user_desc->retry_bytes =
2251 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2252 	tag_buf++;
2253 	ppdu_user_desc->failed_msdus =
2254 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2255 }
2256 
2257 /*
2258  * dp_process_ppdu_stats_flush_tlv: Process
2259  * htt_ppdu_stats_flush_tlv
2260  * @pdev: DP PDEV handle
2261  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2262  *
2263  * return:void
2264  */
2265 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2266 						uint32_t *tag_buf)
2267 {
2268 	uint32_t peer_id;
2269 	uint32_t drop_reason;
2270 	uint8_t tid;
2271 	uint32_t num_msdu;
2272 	struct dp_peer *peer;
2273 
2274 	tag_buf++;
2275 	drop_reason = *tag_buf;
2276 
2277 	tag_buf++;
2278 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2279 
2280 	tag_buf++;
2281 	peer_id =
2282 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2283 
2284 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2285 	if (!peer)
2286 		return;
2287 
2288 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2289 
2290 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2291 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2292 					num_msdu);
2293 	}
2294 
2295 	dp_peer_unref_del_find_by_id(peer);
2296 }
2297 
2298 /*
2299  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2300  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2301  * @pdev: DP PDEV handle
2302  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2303  * @length: tlv_length
2304  *
2305  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2306  */
2307 static QDF_STATUS
2308 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2309 					      qdf_nbuf_t tag_buf,
2310 					      uint32_t ppdu_id)
2311 {
2312 	uint32_t *nbuf_ptr;
2313 	uint8_t trim_size;
2314 
2315 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2316 	    (!pdev->bpr_enable))
2317 		return QDF_STATUS_SUCCESS;
2318 
2319 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2320 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2321 		      qdf_nbuf_data(tag_buf));
2322 
2323 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2324 		return QDF_STATUS_SUCCESS;
2325 
2326 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2327 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2328 
2329 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2330 				tag_buf, sizeof(ppdu_id));
2331 	*nbuf_ptr = ppdu_id;
2332 
2333 	if (pdev->bpr_enable) {
2334 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2335 				     tag_buf, HTT_INVALID_PEER,
2336 				     WDI_NO_VAL, pdev->pdev_id);
2337 	}
2338 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2339 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2340 				     tag_buf, HTT_INVALID_PEER,
2341 				     WDI_NO_VAL, pdev->pdev_id);
2342 	}
2343 
2344 	return QDF_STATUS_E_ALREADY;
2345 }
2346 
2347 /**
2348  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2349  * @pdev: DP pdev handle
2350  * @tag_buf: TLV buffer
2351  * @tlv_len: length of tlv
2352  * @ppdu_info: per ppdu tlv structure
2353  *
2354  * return: void
2355  */
2356 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2357 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2358 {
2359 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2360 
2361 	switch (tlv_type) {
2362 	case HTT_PPDU_STATS_COMMON_TLV:
2363 		qdf_assert_always(tlv_len >=
2364 				sizeof(htt_ppdu_stats_common_tlv));
2365 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2366 		break;
2367 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2368 		qdf_assert_always(tlv_len >=
2369 				sizeof(htt_ppdu_stats_user_common_tlv));
2370 		dp_process_ppdu_stats_user_common_tlv(
2371 				pdev, tag_buf, ppdu_info);
2372 		break;
2373 	case HTT_PPDU_STATS_USR_RATE_TLV:
2374 		qdf_assert_always(tlv_len >=
2375 				sizeof(htt_ppdu_stats_user_rate_tlv));
2376 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2377 		break;
2378 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2379 		qdf_assert_always(tlv_len >=
2380 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2381 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2382 				pdev, tag_buf, ppdu_info);
2383 		break;
2384 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2385 		qdf_assert_always(tlv_len >=
2386 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2387 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2388 				pdev, tag_buf, ppdu_info);
2389 		break;
2390 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2391 		qdf_assert_always(tlv_len >=
2392 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2393 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2394 				pdev, tag_buf, ppdu_info);
2395 		break;
2396 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2397 		qdf_assert_always(tlv_len >=
2398 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2399 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2400 				pdev, tag_buf, ppdu_info);
2401 		break;
2402 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2403 		qdf_assert_always(tlv_len >=
2404 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2405 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2406 				pdev, tag_buf, ppdu_info);
2407 		break;
2408 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2409 		qdf_assert_always(tlv_len >=
2410 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2411 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2412 				pdev, tag_buf, ppdu_info);
2413 		break;
2414 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2415 		qdf_assert_always(tlv_len >=
2416 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2417 		dp_process_ppdu_stats_user_common_array_tlv(
2418 				pdev, tag_buf, ppdu_info);
2419 		break;
2420 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2421 		qdf_assert_always(tlv_len >=
2422 			sizeof(htt_ppdu_stats_flush_tlv));
2423 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2424 				pdev, tag_buf);
2425 		break;
2426 	default:
2427 		break;
2428 	}
2429 }
2430 
2431 /**
2432  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2433  * to upper layer
2434  * @pdev: DP pdev handle
2435  * @ppdu_info: per PPDU TLV descriptor
2436  *
2437  * return: void
2438  */
2439 static
2440 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2441 			  struct ppdu_info *ppdu_info)
2442 {
2443 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2444 	struct dp_peer *peer = NULL;
2445 	qdf_nbuf_t nbuf;
2446 	uint16_t i;
2447 	uint32_t tlv_bitmap_expected;
2448 
2449 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2450 		qdf_nbuf_data(ppdu_info->nbuf);
2451 
2452 	ppdu_desc->num_users = ppdu_info->last_user;
2453 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2454 
2455 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2456 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2457 		if (ppdu_info->is_ampdu)
2458 			tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP;
2459 	}
2460 	for (i = 0; i < ppdu_desc->num_users; i++) {
2461 
2462 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2463 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2464 
2465 		peer = dp_peer_find_by_id(pdev->soc,
2466 					  ppdu_desc->user[i].peer_id);
2467 		/**
2468 		 * This check is to make sure peer is not deleted
2469 		 * after processing the TLVs.
2470 		 */
2471 		if (!peer)
2472 			continue;
2473 
2474 		if (ppdu_info->tlv_bitmap != tlv_bitmap_expected) {
2475 			dp_peer_unref_del_find_by_id(peer);
2476 			continue;
2477 		}
2478 		if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2479 
2480 			dp_tx_stats_update(pdev->soc, peer,
2481 					&ppdu_desc->user[i],
2482 					ppdu_desc->ack_rssi);
2483 		}
2484 
2485 		dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
2486 		dp_peer_unref_del_find_by_id(peer);
2487 	}
2488 
2489 	/*
2490 	 * Remove from the list
2491 	 */
2492 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2493 	nbuf = ppdu_info->nbuf;
2494 	pdev->list_depth--;
2495 	qdf_mem_free(ppdu_info);
2496 
2497 	qdf_assert_always(nbuf);
2498 
2499 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2500 		qdf_nbuf_data(nbuf);
2501 
2502 	/**
2503 	 * Deliver PPDU stats only for valid (acked) data frames if
2504 	 * sniffer mode is not enabled.
2505 	 * If sniffer mode is enabled, PPDU stats for all frames
2506 	 * including mgmt/control frames should be delivered to upper layer
2507 	 */
2508 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2509 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2510 				nbuf, HTT_INVALID_PEER,
2511 				WDI_NO_VAL, pdev->pdev_id);
2512 	} else {
2513 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2514 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2515 
2516 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2517 					pdev->soc, nbuf, HTT_INVALID_PEER,
2518 					WDI_NO_VAL, pdev->pdev_id);
2519 		} else
2520 			qdf_nbuf_free(nbuf);
2521 	}
2522 	return;
2523 }
2524 
2525 /**
2526  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2527  * desc for new ppdu id
2528  * @pdev: DP pdev handle
2529  * @ppdu_id: PPDU unique identifier
2530  * @tlv_type: TLV type received
2531  *
2532  * return: ppdu_info per ppdu tlv structure
2533  */
2534 static
2535 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2536 			uint8_t tlv_type)
2537 {
2538 	struct ppdu_info *ppdu_info = NULL;
2539 
2540 	/*
2541 	 * Find ppdu_id node exists or not
2542 	 */
2543 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2544 
2545 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2546 			break;
2547 		}
2548 	}
2549 
2550 	if (ppdu_info) {
2551 		/**
2552 		 * if we get tlv_type that is already been processed for ppdu,
2553 		 * that means we got a new ppdu with same ppdu id.
2554 		 * Hence Flush the older ppdu
2555 		 */
2556 		if (ppdu_info->tlv_bitmap & (1 << tlv_type))
2557 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2558 		else
2559 			return ppdu_info;
2560 	}
2561 
2562 	/**
2563 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2564 	 * threshold
2565 	 */
2566 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2567 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2568 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2569 	}
2570 
2571 	/*
2572 	 * Allocate new ppdu_info node
2573 	 */
2574 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2575 	if (!ppdu_info)
2576 		return NULL;
2577 
2578 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2579 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2580 			TRUE);
2581 	if (!ppdu_info->nbuf) {
2582 		qdf_mem_free(ppdu_info);
2583 		return NULL;
2584 	}
2585 
2586 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2587 			sizeof(struct cdp_tx_completion_ppdu));
2588 
2589 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2590 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2591 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2592 				"No tailroom for HTT PPDU");
2593 		qdf_nbuf_free(ppdu_info->nbuf);
2594 		ppdu_info->nbuf = NULL;
2595 		ppdu_info->last_user = 0;
2596 		qdf_mem_free(ppdu_info);
2597 		return NULL;
2598 	}
2599 
2600 	/**
2601 	 * No lock is needed because all PPDU TLVs are processed in
2602 	 * same context and this list is updated in same context
2603 	 */
2604 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2605 			ppdu_info_list_elem);
2606 	pdev->list_depth++;
2607 	return ppdu_info;
2608 }
2609 
2610 /**
2611  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2612  * @pdev: DP pdev handle
2613  * @htt_t2h_msg: HTT target to host message
2614  *
2615  * return: ppdu_info per ppdu tlv structure
2616  */
2617 
2618 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2619 		qdf_nbuf_t htt_t2h_msg)
2620 {
2621 	uint32_t length;
2622 	uint32_t ppdu_id;
2623 	uint8_t tlv_type;
2624 	uint32_t tlv_length, tlv_bitmap_expected;
2625 	uint8_t *tlv_buf;
2626 	struct ppdu_info *ppdu_info = NULL;
2627 
2628 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2629 
2630 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2631 
2632 	msg_word = msg_word + 1;
2633 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2634 
2635 
2636 	msg_word = msg_word + 3;
2637 	while (length > 0) {
2638 		tlv_buf = (uint8_t *)msg_word;
2639 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2640 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2641 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2642 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2643 
2644 		if (tlv_length == 0)
2645 			break;
2646 
2647 		tlv_length += HTT_TLV_HDR_LEN;
2648 
2649 		/**
2650 		 * Not allocating separate ppdu descriptor for MGMT Payload
2651 		 * TLV as this is sent as separate WDI indication and it
2652 		 * doesn't contain any ppdu information
2653 		 */
2654 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2655 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
2656 			pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length;
2657 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
2658 			msg_word =
2659 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2660 			length -= (tlv_length);
2661 			continue;
2662 		}
2663 
2664 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2665 		if (!ppdu_info)
2666 			return NULL;
2667 		ppdu_info->ppdu_id = ppdu_id;
2668 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2669 
2670 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2671 
2672 		/**
2673 		 * Increment pdev level tlv count to monitor
2674 		 * missing TLVs
2675 		 */
2676 		pdev->tlv_count++;
2677 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2678 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2679 		length -= (tlv_length);
2680 	}
2681 
2682 	if (!ppdu_info)
2683 		return NULL;
2684 
2685 	pdev->last_ppdu_id = ppdu_id;
2686 
2687 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2688 
2689 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2690 		if (ppdu_info->is_ampdu)
2691 			tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP;
2692 	}
2693 
2694 	/**
2695 	 * Once all the TLVs for a given PPDU has been processed,
2696 	 * return PPDU status to be delivered to higher layer
2697 	 */
2698 	if (ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2699 		return ppdu_info;
2700 
2701 	return NULL;
2702 }
2703 #endif /* FEATURE_PERPKT_INFO */
2704 
2705 /**
2706  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2707  * @soc: DP SOC handle
2708  * @pdev_id: pdev id
2709  * @htt_t2h_msg: HTT message nbuf
2710  *
2711  * return:void
2712  */
2713 #if defined(WDI_EVENT_ENABLE)
2714 #ifdef FEATURE_PERPKT_INFO
2715 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2716 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2717 {
2718 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2719 	struct ppdu_info *ppdu_info = NULL;
2720 	bool free_buf = true;
2721 
2722 	if (!pdev)
2723 		return true;
2724 
2725 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2726 	    !pdev->mcopy_mode && !pdev->bpr_enable)
2727 		return free_buf;
2728 
2729 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
2730 
2731 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
2732 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
2733 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
2734 		    QDF_STATUS_SUCCESS)
2735 			free_buf = false;
2736 
2737 		pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
2738 		pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
2739 		pdev->mgmtctrl_frm_info.ppdu_id = 0;
2740 	}
2741 
2742 	if (ppdu_info)
2743 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2744 
2745 	return free_buf;
2746 }
2747 #else
2748 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2749 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2750 {
2751 	return true;
2752 }
2753 #endif
2754 #endif
2755 
2756 /**
2757  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2758  * @soc: DP SOC handle
2759  * @htt_t2h_msg: HTT message nbuf
2760  *
2761  * return:void
2762  */
2763 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2764 		qdf_nbuf_t htt_t2h_msg)
2765 {
2766 	uint8_t done;
2767 	qdf_nbuf_t msg_copy;
2768 	uint32_t *msg_word;
2769 
2770 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2771 	msg_word = msg_word + 3;
2772 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2773 
2774 	/*
2775 	 * HTT EXT stats response comes as stream of TLVs which span over
2776 	 * multiple T2H messages.
2777 	 * The first message will carry length of the response.
2778 	 * For rest of the messages length will be zero.
2779 	 *
2780 	 * Clone the T2H message buffer and store it in a list to process
2781 	 * it later.
2782 	 *
2783 	 * The original T2H message buffers gets freed in the T2H HTT event
2784 	 * handler
2785 	 */
2786 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2787 
2788 	if (!msg_copy) {
2789 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2790 				"T2H messge clone failed for HTT EXT STATS");
2791 		goto error;
2792 	}
2793 
2794 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2795 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2796 	/*
2797 	 * Done bit signifies that this is the last T2H buffer in the stream of
2798 	 * HTT EXT STATS message
2799 	 */
2800 	if (done) {
2801 		soc->htt_stats.num_stats++;
2802 		qdf_sched_work(0, &soc->htt_stats.work);
2803 	}
2804 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2805 
2806 	return;
2807 
2808 error:
2809 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2810 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2811 			!= NULL) {
2812 		qdf_nbuf_free(msg_copy);
2813 	}
2814 	soc->htt_stats.num_stats = 0;
2815 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2816 	return;
2817 
2818 }
2819 
2820 /*
2821  * htt_soc_attach_target() - SOC level HTT setup
2822  * @htt_soc:	HTT SOC handle
2823  *
2824  * Return: 0 on success; error code on failure
2825  */
2826 int htt_soc_attach_target(void *htt_soc)
2827 {
2828 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2829 
2830 	return htt_h2t_ver_req_msg(soc);
2831 }
2832 
2833 
2834 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2835 /*
2836  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2837  * @htt_soc:	 HTT SOC handle
2838  * @msg_word:    Pointer to payload
2839  * @htt_t2h_msg: HTT msg nbuf
2840  *
2841  * Return: True if buffer should be freed by caller.
2842  */
2843 static bool
2844 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2845 				uint32_t *msg_word,
2846 				qdf_nbuf_t htt_t2h_msg)
2847 {
2848 	u_int8_t pdev_id;
2849 	bool free_buf;
2850 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2851 	dp_debug("received HTT_T2H_MSG_TYPE_PPDU_STATS_IND");
2852 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2853 	pdev_id = DP_HW2SW_MACID(pdev_id);
2854 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2855 					      htt_t2h_msg);
2856 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2857 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2858 		pdev_id);
2859 	return free_buf;
2860 }
2861 #else
2862 static bool
2863 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2864 				uint32_t *msg_word,
2865 				qdf_nbuf_t htt_t2h_msg)
2866 {
2867 	return true;
2868 }
2869 #endif
2870 
2871 #if defined(WDI_EVENT_ENABLE) && \
2872 	!defined(REMOVE_PKT_LOG)
2873 /*
2874  * dp_pktlog_msg_handler() - Pktlog msg handler
2875  * @htt_soc:	 HTT SOC handle
2876  * @msg_word:    Pointer to payload
2877  *
2878  * Return: None
2879  */
2880 static void
2881 dp_pktlog_msg_handler(struct htt_soc *soc,
2882 		      uint32_t *msg_word)
2883 {
2884 	uint8_t pdev_id;
2885 	uint32_t *pl_hdr;
2886 
2887 	dp_debug("received HTT_T2H_MSG_TYPE_PKTLOG");
2888 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2889 	pdev_id = DP_HW2SW_MACID(pdev_id);
2890 	pl_hdr = (msg_word + 1);
2891 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2892 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2893 		pdev_id);
2894 }
2895 #else
2896 static void
2897 dp_pktlog_msg_handler(struct htt_soc *soc,
2898 		      uint32_t *msg_word)
2899 {
2900 }
2901 #endif
2902 /*
2903  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2904  * @context:	Opaque context (HTT SOC handle)
2905  * @pkt:	HTC packet
2906  */
2907 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2908 {
2909 	struct htt_soc *soc = (struct htt_soc *) context;
2910 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2911 	u_int32_t *msg_word;
2912 	enum htt_t2h_msg_type msg_type;
2913 	bool free_buf = true;
2914 
2915 	/* check for successful message reception */
2916 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2917 		if (pkt->Status != QDF_STATUS_E_CANCELED)
2918 			soc->stats.htc_err_cnt++;
2919 
2920 		qdf_nbuf_free(htt_t2h_msg);
2921 		return;
2922 	}
2923 
2924 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
2925 
2926 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
2927 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2928 	switch (msg_type) {
2929 	case HTT_T2H_MSG_TYPE_PEER_MAP:
2930 		{
2931 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
2932 			u_int8_t *peer_mac_addr;
2933 			u_int16_t peer_id;
2934 			u_int16_t hw_peer_id;
2935 			u_int8_t vdev_id;
2936 			u_int8_t is_wds;
2937 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
2938 
2939 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
2940 			hw_peer_id =
2941 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
2942 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
2943 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
2944 				(u_int8_t *) (msg_word+1),
2945 				&mac_addr_deswizzle_buf[0]);
2946 			QDF_TRACE(QDF_MODULE_ID_TXRX,
2947 				QDF_TRACE_LEVEL_INFO,
2948 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
2949 				peer_id, vdev_id);
2950 
2951 			/*
2952 			 * check if peer already exists for this peer_id, if so
2953 			 * this peer map event is in response for a wds peer add
2954 			 * wmi command sent during wds source port learning.
2955 			 * in this case just add the ast entry to the existing
2956 			 * peer ast_list.
2957 			 */
2958 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
2959 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
2960 					       vdev_id, peer_mac_addr, 0,
2961 					       is_wds);
2962 			break;
2963 		}
2964 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
2965 		{
2966 			u_int16_t peer_id;
2967 			u_int8_t vdev_id;
2968 			u_int8_t mac_addr[HTT_MAC_ADDR_LEN] = {0};
2969 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
2970 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
2971 
2972 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
2973 						 vdev_id, mac_addr, 0);
2974 			break;
2975 		}
2976 	case HTT_T2H_MSG_TYPE_SEC_IND:
2977 		{
2978 			u_int16_t peer_id;
2979 			enum cdp_sec_type sec_type;
2980 			int is_unicast;
2981 
2982 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
2983 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
2984 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
2985 			/* point to the first part of the Michael key */
2986 			msg_word++;
2987 			dp_rx_sec_ind_handler(
2988 				soc->dp_soc, peer_id, sec_type, is_unicast,
2989 				msg_word, msg_word + 2);
2990 			break;
2991 		}
2992 
2993 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
2994 		{
2995 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
2996 							     htt_t2h_msg);
2997 			break;
2998 		}
2999 
3000 	case HTT_T2H_MSG_TYPE_PKTLOG:
3001 		{
3002 			dp_pktlog_msg_handler(soc, msg_word);
3003 			break;
3004 		}
3005 
3006 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3007 		{
3008 			htc_pm_runtime_put(soc->htc_soc);
3009 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3010 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3011 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3012 				"target uses HTT version %d.%d; host uses %d.%d",
3013 				soc->tgt_ver.major, soc->tgt_ver.minor,
3014 				HTT_CURRENT_VERSION_MAJOR,
3015 				HTT_CURRENT_VERSION_MINOR);
3016 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3017 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3018 					QDF_TRACE_LEVEL_ERROR,
3019 					"*** Incompatible host/target HTT versions!");
3020 			}
3021 			/* abort if the target is incompatible with the host */
3022 			qdf_assert(soc->tgt_ver.major ==
3023 				HTT_CURRENT_VERSION_MAJOR);
3024 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3025 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3026 					QDF_TRACE_LEVEL_WARN,
3027 					"*** Warning: host/target HTT versions"
3028 					" are different, though compatible!");
3029 			}
3030 			break;
3031 		}
3032 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3033 		{
3034 			uint16_t peer_id;
3035 			uint8_t tid;
3036 			uint8_t win_sz;
3037 			uint16_t status;
3038 			struct dp_peer *peer;
3039 
3040 			/*
3041 			 * Update REO Queue Desc with new values
3042 			 */
3043 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3044 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3045 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3046 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3047 
3048 			/*
3049 			 * Window size needs to be incremented by 1
3050 			 * since fw needs to represent a value of 256
3051 			 * using just 8 bits
3052 			 */
3053 			if (peer) {
3054 				status = dp_addba_requestprocess_wifi3(peer,
3055 						0, tid, 0, win_sz + 1, 0xffff);
3056 
3057 				/*
3058 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
3059 				 * which is inc by dp_peer_find_by_id
3060 				 */
3061 				dp_peer_unref_del_find_by_id(peer);
3062 
3063 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3064 					QDF_TRACE_LEVEL_INFO,
3065 					FL("PeerID %d BAW %d TID %d stat %d"),
3066 					peer_id, win_sz, tid, status);
3067 
3068 			} else {
3069 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3070 					QDF_TRACE_LEVEL_ERROR,
3071 					FL("Peer not found peer id %d"),
3072 					peer_id);
3073 			}
3074 			break;
3075 		}
3076 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3077 		{
3078 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3079 			break;
3080 		}
3081 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3082 		{
3083 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
3084 			u_int8_t *peer_mac_addr;
3085 			u_int16_t peer_id;
3086 			u_int16_t hw_peer_id;
3087 			u_int8_t vdev_id;
3088 			bool is_wds;
3089 			u_int16_t ast_hash;
3090 
3091 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3092 			hw_peer_id =
3093 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3094 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3095 			peer_mac_addr =
3096 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3097 						   &mac_addr_deswizzle_buf[0]);
3098 			is_wds =
3099 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3100 			ast_hash =
3101 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3102 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3103 				  QDF_TRACE_LEVEL_INFO,
3104 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3105 				  peer_id, vdev_id);
3106 
3107 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3108 					       hw_peer_id, vdev_id,
3109 					       peer_mac_addr, ast_hash,
3110 					       is_wds);
3111 			break;
3112 		}
3113 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3114 		{
3115 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
3116 			u_int8_t *mac_addr;
3117 			u_int16_t peer_id;
3118 			u_int8_t vdev_id;
3119 			u_int8_t is_wds;
3120 
3121 			peer_id =
3122 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3123 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3124 			mac_addr =
3125 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3126 						   &mac_addr_deswizzle_buf[0]);
3127 			is_wds =
3128 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3129 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3130 				  QDF_TRACE_LEVEL_INFO,
3131 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3132 				  peer_id, vdev_id);
3133 
3134 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3135 						 vdev_id, mac_addr,
3136 						 is_wds);
3137 			break;
3138 		}
3139 	default:
3140 		break;
3141 	};
3142 
3143 	/* Free the indication buffer */
3144 	if (free_buf)
3145 		qdf_nbuf_free(htt_t2h_msg);
3146 }
3147 
3148 /*
3149  * dp_htt_h2t_full() - Send full handler (called from HTC)
3150  * @context:	Opaque context (HTT SOC handle)
3151  * @pkt:	HTC packet
3152  *
3153  * Return: enum htc_send_full_action
3154  */
3155 static enum htc_send_full_action
3156 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3157 {
3158 	return HTC_SEND_FULL_KEEP;
3159 }
3160 
3161 /*
3162  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3163  * @context:	Opaque context (HTT SOC handle)
3164  * @nbuf:	nbuf containing T2H message
3165  * @pipe_id:	HIF pipe ID
3166  *
3167  * Return: QDF_STATUS
3168  *
3169  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3170  * will be used for packet log and other high-priority HTT messages. Proper
3171  * HTC connection to be added later once required FW changes are available
3172  */
3173 static QDF_STATUS
3174 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3175 {
3176 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3177 	HTC_PACKET htc_pkt;
3178 
3179 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3180 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3181 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3182 	htc_pkt.pPktContext = (void *)nbuf;
3183 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3184 
3185 	return rc;
3186 }
3187 
3188 /*
3189  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3190  * @htt_soc:	HTT SOC handle
3191  *
3192  * Return: 0 on success; error code on failure
3193  */
3194 static int
3195 htt_htc_soc_attach(struct htt_soc *soc)
3196 {
3197 	struct htc_service_connect_req connect;
3198 	struct htc_service_connect_resp response;
3199 	A_STATUS status;
3200 	struct dp_soc *dpsoc = soc->dp_soc;
3201 
3202 	qdf_mem_set(&connect, sizeof(connect), 0);
3203 	qdf_mem_set(&response, sizeof(response), 0);
3204 
3205 	connect.pMetaData = NULL;
3206 	connect.MetaDataLength = 0;
3207 	connect.EpCallbacks.pContext = soc;
3208 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3209 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3210 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3211 
3212 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3213 	connect.EpCallbacks.EpRecvRefill = NULL;
3214 
3215 	/* N/A, fill is done by HIF */
3216 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3217 
3218 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3219 	/*
3220 	 * Specify how deep to let a queue get before htc_send_pkt will
3221 	 * call the EpSendFull function due to excessive send queue depth.
3222 	 */
3223 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3224 
3225 	/* disable flow control for HTT data message service */
3226 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3227 
3228 	/* connect to control service */
3229 	connect.service_id = HTT_DATA_MSG_SVC;
3230 
3231 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3232 
3233 	if (status != A_OK)
3234 		return QDF_STATUS_E_FAILURE;
3235 
3236 	soc->htc_endpoint = response.Endpoint;
3237 
3238 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3239 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3240 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3241 
3242 	return 0; /* success */
3243 }
3244 
3245 /*
3246  * htt_soc_initialize() - SOC level HTT initialization
3247  * @htt_soc: Opaque htt SOC handle
3248  * @ctrl_psoc: Opaque ctrl SOC handle
3249  * @htc_soc: SOC level HTC handle
3250  * @hal_soc: Opaque HAL SOC handle
3251  * @osdev: QDF device
3252  *
3253  * Return: HTT handle on success; NULL on failure
3254  */
3255 void *
3256 htt_soc_initialize(void *htt_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3257 		   void *hal_soc, qdf_device_t osdev)
3258 {
3259 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3260 
3261 	soc->osdev = osdev;
3262 	soc->ctrl_psoc = ctrl_psoc;
3263 	soc->htc_soc = htc_soc;
3264 	soc->hal_soc = hal_soc;
3265 
3266 	if (htt_htc_soc_attach(soc))
3267 		goto fail2;
3268 
3269 	return soc;
3270 
3271 fail2:
3272 	return NULL;
3273 }
3274 
3275 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3276 {
3277 	htt_htc_misc_pkt_pool_free(htt_handle);
3278 	htt_htc_pkt_pool_free(htt_handle);
3279 }
3280 
3281 /*
3282  * htt_soc_htc_prealloc() - HTC memory prealloc
3283  * @htt_soc: SOC level HTT handle
3284  *
3285  * Return: QDF_STATUS_SUCCESS on Success or
3286  * QDF_STATUS_E_NOMEM on allocation failure
3287  */
3288 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3289 {
3290 	int i;
3291 
3292 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3293 
3294 	soc->htt_htc_pkt_freelist = NULL;
3295 	/* pre-allocate some HTC_PACKET objects */
3296 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3297 		struct dp_htt_htc_pkt_union *pkt;
3298 		pkt = qdf_mem_malloc(sizeof(*pkt));
3299 		if (!pkt)
3300 			return QDF_STATUS_E_NOMEM;
3301 
3302 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3303 	}
3304 	return QDF_STATUS_SUCCESS;
3305 }
3306 
3307 /*
3308  * htt_soc_detach() - Free SOC level HTT handle
3309  * @htt_hdl: HTT SOC handle
3310  */
3311 void htt_soc_detach(void *htt_hdl)
3312 {
3313 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3314 
3315 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3316 	qdf_mem_free(htt_handle);
3317 }
3318 
3319 /**
3320  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3321  * @pdev: DP PDEV handle
3322  * @stats_type_upload_mask: stats type requested by user
3323  * @config_param_0: extra configuration parameters
3324  * @config_param_1: extra configuration parameters
3325  * @config_param_2: extra configuration parameters
3326  * @config_param_3: extra configuration parameters
3327  * @mac_id: mac number
3328  *
3329  * return: QDF STATUS
3330  */
3331 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3332 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3333 		uint32_t config_param_1, uint32_t config_param_2,
3334 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3335 		uint8_t mac_id)
3336 {
3337 	struct htt_soc *soc = pdev->soc->htt_handle;
3338 	struct dp_htt_htc_pkt *pkt;
3339 	qdf_nbuf_t msg;
3340 	uint32_t *msg_word;
3341 	uint8_t pdev_mask = 0;
3342 
3343 	msg = qdf_nbuf_alloc(
3344 			soc->osdev,
3345 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3346 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3347 
3348 	if (!msg)
3349 		return QDF_STATUS_E_NOMEM;
3350 
3351 	/*TODO:Add support for SOC stats
3352 	 * Bit 0: SOC Stats
3353 	 * Bit 1: Pdev stats for pdev id 0
3354 	 * Bit 2: Pdev stats for pdev id 1
3355 	 * Bit 3: Pdev stats for pdev id 2
3356 	 */
3357 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3358 
3359 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3360 	/*
3361 	 * Set the length of the message.
3362 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3363 	 * separately during the below call to qdf_nbuf_push_head.
3364 	 * The contribution from the HTC header is added separately inside HTC.
3365 	 */
3366 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3367 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3368 				"Failed to expand head for HTT_EXT_STATS");
3369 		qdf_nbuf_free(msg);
3370 		return QDF_STATUS_E_FAILURE;
3371 	}
3372 
3373 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3374 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3375 		"config_param_1 %u\n config_param_2 %u\n"
3376 		"config_param_4 %u\n -------------",
3377 		__func__, __LINE__, cookie_val, config_param_0,
3378 		config_param_1, config_param_2,	config_param_3);
3379 
3380 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3381 
3382 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3383 	*msg_word = 0;
3384 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3385 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3386 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3387 
3388 	/* word 1 */
3389 	msg_word++;
3390 	*msg_word = 0;
3391 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3392 
3393 	/* word 2 */
3394 	msg_word++;
3395 	*msg_word = 0;
3396 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3397 
3398 	/* word 3 */
3399 	msg_word++;
3400 	*msg_word = 0;
3401 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3402 
3403 	/* word 4 */
3404 	msg_word++;
3405 	*msg_word = 0;
3406 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3407 
3408 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3409 
3410 	/* word 5 */
3411 	msg_word++;
3412 
3413 	/* word 6 */
3414 	msg_word++;
3415 	*msg_word = 0;
3416 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3417 
3418 	/* word 7 */
3419 	msg_word++;
3420 	*msg_word = 0;
3421 	/*Using last 2 bits for pdev_id */
3422 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3423 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3424 
3425 	pkt = htt_htc_pkt_alloc(soc);
3426 	if (!pkt) {
3427 		qdf_nbuf_free(msg);
3428 		return QDF_STATUS_E_NOMEM;
3429 	}
3430 
3431 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3432 
3433 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3434 			dp_htt_h2t_send_complete_free_netbuf,
3435 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3436 			soc->htc_endpoint,
3437 			1); /* tag - not relevant here */
3438 
3439 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3440 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3441 	return 0;
3442 }
3443 
3444 /* This macro will revert once proper HTT header will define for
3445  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3446  * */
3447 #if defined(WDI_EVENT_ENABLE)
3448 /**
3449  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3450  * @pdev: DP PDEV handle
3451  * @stats_type_upload_mask: stats type requested by user
3452  * @mac_id: Mac id number
3453  *
3454  * return: QDF STATUS
3455  */
3456 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3457 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3458 {
3459 	struct htt_soc *soc = pdev->soc->htt_handle;
3460 	struct dp_htt_htc_pkt *pkt;
3461 	qdf_nbuf_t msg;
3462 	uint32_t *msg_word;
3463 	uint8_t pdev_mask;
3464 
3465 	msg = qdf_nbuf_alloc(
3466 			soc->osdev,
3467 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3468 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3469 
3470 	if (!msg) {
3471 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3472 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
3473 		qdf_assert(0);
3474 		return QDF_STATUS_E_NOMEM;
3475 	}
3476 
3477 	/*TODO:Add support for SOC stats
3478 	 * Bit 0: SOC Stats
3479 	 * Bit 1: Pdev stats for pdev id 0
3480 	 * Bit 2: Pdev stats for pdev id 1
3481 	 * Bit 3: Pdev stats for pdev id 2
3482 	 */
3483 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3484 
3485 	/*
3486 	 * Set the length of the message.
3487 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3488 	 * separately during the below call to qdf_nbuf_push_head.
3489 	 * The contribution from the HTC header is added separately inside HTC.
3490 	 */
3491 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3492 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3493 				"Failed to expand head for HTT_CFG_STATS");
3494 		qdf_nbuf_free(msg);
3495 		return QDF_STATUS_E_FAILURE;
3496 	}
3497 
3498 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3499 
3500 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3501 	*msg_word = 0;
3502 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3503 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3504 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3505 			stats_type_upload_mask);
3506 
3507 	pkt = htt_htc_pkt_alloc(soc);
3508 	if (!pkt) {
3509 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3510 				"Fail to allocate dp_htt_htc_pkt buffer");
3511 		qdf_assert(0);
3512 		qdf_nbuf_free(msg);
3513 		return QDF_STATUS_E_NOMEM;
3514 	}
3515 
3516 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3517 
3518 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3519 			dp_htt_h2t_send_complete_free_netbuf,
3520 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3521 			soc->htc_endpoint,
3522 			1); /* tag - not relevant here */
3523 
3524 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3525 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3526 	return 0;
3527 }
3528 #endif
3529