xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_htt.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
30 #include "cdp_txrx_cmn_struct.h"
31 
32 #ifdef FEATURE_PERPKT_INFO
33 #include "dp_ratetable.h"
34 #endif
35 
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 
38 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
39 #define HTT_T2H_MAX_MSG_SIZE 2048
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
48 do {                                                             \
49 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
50 					QDF_STATUS_SUCCESS)      \
51 		htt_htc_misc_pkt_list_add(soc, pkt);             \
52 } while (0)
53 
54 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
55 
56 /**
57  * Bitmap of HTT PPDU TLV types for Default mode
58  */
59 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
60 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
61 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
62 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
63 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
64 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
65 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
66 
67 /**
68  * Bitmap of HTT PPDU TLV types for Sniffer mode
69  */
70 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP \
71 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
72 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
74 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
75 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
76 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
77 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
78 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
79 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV)
80 
81 #define HTT_FRAMECTRL_DATATYPE 0x08
82 #define HTT_PPDU_DESC_MAX_DEPTH 16
83 #define DP_SCAN_PEER_ID 0xFFFF
84 
85 /*
86  * dp_tx_stats_update() - Update per-peer statistics
87  * @soc: Datapath soc handle
88  * @peer: Datapath peer handle
89  * @ppdu: PPDU Descriptor
90  * @ack_rssi: RSSI of last ack received
91  *
92  * Return: None
93  */
94 #ifdef FEATURE_PERPKT_INFO
95 static inline void
96 dp_tx_rate_stats_update(struct dp_peer *peer,
97 			struct cdp_tx_completion_ppdu_user *ppdu)
98 {
99 	uint32_t ratekbps = 0;
100 	uint32_t ppdu_tx_rate = 0;
101 
102 	if (!peer || !ppdu)
103 		return;
104 
105 	dp_peer_stats_notify(peer);
106 
107 	ratekbps = dp_getrateindex(ppdu->gi,
108 				   ppdu->mcs,
109 				   ppdu->nss,
110 				   ppdu->preamble,
111 				   ppdu->bw);
112 
113 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
114 
115 	if (!ratekbps)
116 		return;
117 
118 	dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
119 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
120 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
121 
122 	if (peer->vdev) {
123 		peer->vdev->stats.tx.last_tx_rate = ratekbps;
124 		peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
125 	}
126 }
127 
128 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
129 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
130 {
131 	struct dp_pdev *pdev = peer->vdev->pdev;
132 	uint8_t preamble, mcs;
133 	uint16_t num_msdu;
134 
135 	preamble = ppdu->preamble;
136 	mcs = ppdu->mcs;
137 	num_msdu = ppdu->num_msdu;
138 
139 	/* If the peer statistics are already processed as part of
140 	 * per-MSDU completion handler, do not process these again in per-PPDU
141 	 * indications */
142 	if (soc->process_tx_status)
143 		return;
144 
145 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
146 			num_msdu, (ppdu->success_bytes +
147 				ppdu->retry_bytes + ppdu->failed_bytes));
148 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
149 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
150 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
151 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
152 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
153 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
154 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
155 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
156 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
157 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
158 
159 	DP_STATS_INC(peer, tx.retries,
160 			(ppdu->long_retries + ppdu->short_retries));
161 	DP_STATS_INCC(peer,
162 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
163 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
164 	DP_STATS_INCC(peer,
165 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
166 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
167 	DP_STATS_INCC(peer,
168 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
169 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
170 	DP_STATS_INCC(peer,
171 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
172 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
173 	DP_STATS_INCC(peer,
174 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
175 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
176 	DP_STATS_INCC(peer,
177 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
178 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
179 	DP_STATS_INCC(peer,
180 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
181 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
182 	DP_STATS_INCC(peer,
183 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
184 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
185 	DP_STATS_INCC(peer,
186 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
187 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
188 	DP_STATS_INCC(peer,
189 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
190 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
191 
192 	dp_tx_rate_stats_update(peer, ppdu);
193 
194 	if (peer->stats.tx.ucast.num)
195 		peer->stats.tx.last_per = ((peer->stats.tx.ucast.num -
196 					peer->stats.tx.tx_success.num) * 100) /
197 					peer->stats.tx.ucast.num;
198 
199 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
200 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
201 				&peer->stats, ppdu->peer_id,
202 				UPDATE_PEER_STATS);
203 
204 	}
205 }
206 #endif
207 
208 /*
209  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
210  * @htt_soc:	HTT SOC handle
211  *
212  * Return: Pointer to htc packet buffer
213  */
214 static struct dp_htt_htc_pkt *
215 htt_htc_pkt_alloc(struct htt_soc *soc)
216 {
217 	struct dp_htt_htc_pkt_union *pkt = NULL;
218 
219 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
220 	if (soc->htt_htc_pkt_freelist) {
221 		pkt = soc->htt_htc_pkt_freelist;
222 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
223 	}
224 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
225 
226 	if (pkt == NULL)
227 		pkt = qdf_mem_malloc(sizeof(*pkt));
228 	return &pkt->u.pkt; /* not actually a dereference */
229 }
230 
231 /*
232  * htt_htc_pkt_free() - Free HTC packet buffer
233  * @htt_soc:	HTT SOC handle
234  */
235 static void
236 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
237 {
238 	struct dp_htt_htc_pkt_union *u_pkt =
239 		(struct dp_htt_htc_pkt_union *)pkt;
240 
241 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
242 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
243 	soc->htt_htc_pkt_freelist = u_pkt;
244 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
245 }
246 
247 /*
248  * htt_htc_pkt_pool_free() - Free HTC packet pool
249  * @htt_soc:	HTT SOC handle
250  */
251 static void
252 htt_htc_pkt_pool_free(struct htt_soc *soc)
253 {
254 	struct dp_htt_htc_pkt_union *pkt, *next;
255 	pkt = soc->htt_htc_pkt_freelist;
256 	while (pkt) {
257 		next = pkt->u.next;
258 		qdf_mem_free(pkt);
259 		pkt = next;
260 	}
261 	soc->htt_htc_pkt_freelist = NULL;
262 }
263 
264 /*
265  * htt_htc_misc_pkt_list_trim() - trim misc list
266  * @htt_soc: HTT SOC handle
267  * @level: max no. of pkts in list
268  */
269 static void
270 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
271 {
272 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
273 	int i = 0;
274 	qdf_nbuf_t netbuf;
275 
276 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
277 	pkt = soc->htt_htc_pkt_misclist;
278 	while (pkt) {
279 		next = pkt->u.next;
280 		/* trim the out grown list*/
281 		if (++i > level) {
282 			netbuf =
283 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
284 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
285 			qdf_nbuf_free(netbuf);
286 			qdf_mem_free(pkt);
287 			pkt = NULL;
288 			if (prev)
289 				prev->u.next = NULL;
290 		}
291 		prev = pkt;
292 		pkt = next;
293 	}
294 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
295 }
296 
297 /*
298  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
299  * @htt_soc:	HTT SOC handle
300  * @dp_htt_htc_pkt: pkt to be added to list
301  */
302 static void
303 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
304 {
305 	struct dp_htt_htc_pkt_union *u_pkt =
306 				(struct dp_htt_htc_pkt_union *)pkt;
307 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
308 							pkt->htc_pkt.Endpoint)
309 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
310 
311 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
312 	if (soc->htt_htc_pkt_misclist) {
313 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
314 		soc->htt_htc_pkt_misclist = u_pkt;
315 	} else {
316 		soc->htt_htc_pkt_misclist = u_pkt;
317 	}
318 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
319 
320 	/* only ce pipe size + tx_queue_depth could possibly be in use
321 	 * free older packets in the misclist
322 	 */
323 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
324 }
325 
326 /*
327  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
328  * @htt_soc:	HTT SOC handle
329  */
330 static void
331 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
332 {
333 	struct dp_htt_htc_pkt_union *pkt, *next;
334 	qdf_nbuf_t netbuf;
335 
336 	pkt = soc->htt_htc_pkt_misclist;
337 
338 	while (pkt) {
339 		next = pkt->u.next;
340 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
341 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
342 
343 		soc->stats.htc_pkt_free++;
344 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
345 			 "%s: Pkt free count %d",
346 			 __func__, soc->stats.htc_pkt_free);
347 
348 		qdf_nbuf_free(netbuf);
349 		qdf_mem_free(pkt);
350 		pkt = next;
351 	}
352 	soc->htt_htc_pkt_misclist = NULL;
353 }
354 
355 /*
356  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
357  * @tgt_mac_addr:	Target MAC
358  * @buffer:		Output buffer
359  */
360 static u_int8_t *
361 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
362 {
363 #ifdef BIG_ENDIAN_HOST
364 	/*
365 	 * The host endianness is opposite of the target endianness.
366 	 * To make u_int32_t elements come out correctly, the target->host
367 	 * upload has swizzled the bytes in each u_int32_t element of the
368 	 * message.
369 	 * For byte-array message fields like the MAC address, this
370 	 * upload swizzling puts the bytes in the wrong order, and needs
371 	 * to be undone.
372 	 */
373 	buffer[0] = tgt_mac_addr[3];
374 	buffer[1] = tgt_mac_addr[2];
375 	buffer[2] = tgt_mac_addr[1];
376 	buffer[3] = tgt_mac_addr[0];
377 	buffer[4] = tgt_mac_addr[7];
378 	buffer[5] = tgt_mac_addr[6];
379 	return buffer;
380 #else
381 	/*
382 	 * The host endianness matches the target endianness -
383 	 * we can use the mac addr directly from the message buffer.
384 	 */
385 	return tgt_mac_addr;
386 #endif
387 }
388 
389 /*
390  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
391  * @soc:	SOC handle
392  * @status:	Completion status
393  * @netbuf:	HTT buffer
394  */
395 static void
396 dp_htt_h2t_send_complete_free_netbuf(
397 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
398 {
399 	qdf_nbuf_free(netbuf);
400 }
401 
402 /*
403  * dp_htt_h2t_send_complete() - H2T completion handler
404  * @context:	Opaque context (HTT SOC handle)
405  * @htc_pkt:	HTC packet
406  */
407 static void
408 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
409 {
410 	void (*send_complete_part2)(
411 		void *soc, A_STATUS status, qdf_nbuf_t msdu);
412 	struct htt_soc *soc =  (struct htt_soc *) context;
413 	struct dp_htt_htc_pkt *htt_pkt;
414 	qdf_nbuf_t netbuf;
415 
416 	send_complete_part2 = htc_pkt->pPktContext;
417 
418 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
419 
420 	/* process (free or keep) the netbuf that held the message */
421 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
422 	/*
423 	 * adf sendcomplete is required for windows only
424 	 */
425 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
426 	if (send_complete_part2 != NULL) {
427 		send_complete_part2(
428 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
429 	}
430 	/* free the htt_htc_pkt / HTC_PACKET object */
431 	htt_htc_pkt_free(soc, htt_pkt);
432 }
433 
434 /*
435  * htt_h2t_ver_req_msg() - Send HTT version request message to target
436  * @htt_soc:	HTT SOC handle
437  *
438  * Return: 0 on success; error code on failure
439  */
440 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
441 {
442 	struct dp_htt_htc_pkt *pkt;
443 	qdf_nbuf_t msg;
444 	uint32_t *msg_word;
445 
446 	msg = qdf_nbuf_alloc(
447 		soc->osdev,
448 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
449 		/* reserve room for the HTC header */
450 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
451 	if (!msg)
452 		return QDF_STATUS_E_NOMEM;
453 
454 	/*
455 	 * Set the length of the message.
456 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
457 	 * separately during the below call to qdf_nbuf_push_head.
458 	 * The contribution from the HTC header is added separately inside HTC.
459 	 */
460 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
461 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
462 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
463 			__func__);
464 		return QDF_STATUS_E_FAILURE;
465 	}
466 
467 	/* fill in the message contents */
468 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
469 
470 	/* rewind beyond alignment pad to get to the HTC header reserved area */
471 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
472 
473 	*msg_word = 0;
474 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
475 
476 	pkt = htt_htc_pkt_alloc(soc);
477 	if (!pkt) {
478 		qdf_nbuf_free(msg);
479 		return QDF_STATUS_E_FAILURE;
480 	}
481 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
482 
483 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
484 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
485 		qdf_nbuf_len(msg), soc->htc_endpoint,
486 		1); /* tag - not relevant here */
487 
488 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
489 	DP_HTT_SEND_HTC_PKT(soc, pkt);
490 	return 0;
491 }
492 
493 /*
494  * htt_srng_setup() - Send SRNG setup message to target
495  * @htt_soc:	HTT SOC handle
496  * @mac_id:	MAC Id
497  * @hal_srng:	Opaque HAL SRNG pointer
498  * @hal_ring_type:	SRNG ring type
499  *
500  * Return: 0 on success; error code on failure
501  */
502 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
503 	int hal_ring_type)
504 {
505 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
506 	struct dp_htt_htc_pkt *pkt;
507 	qdf_nbuf_t htt_msg;
508 	uint32_t *msg_word;
509 	struct hal_srng_params srng_params;
510 	qdf_dma_addr_t hp_addr, tp_addr;
511 	uint32_t ring_entry_size =
512 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
513 	int htt_ring_type, htt_ring_id;
514 
515 	/* Sizes should be set in 4-byte words */
516 	ring_entry_size = ring_entry_size >> 2;
517 
518 	htt_msg = qdf_nbuf_alloc(soc->osdev,
519 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
520 		/* reserve room for the HTC header */
521 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
522 	if (!htt_msg)
523 		goto fail0;
524 
525 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
526 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
527 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
528 
529 	switch (hal_ring_type) {
530 	case RXDMA_BUF:
531 #ifdef QCA_HOST2FW_RXBUF_RING
532 		if (srng_params.ring_id ==
533 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
534 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
535 			htt_ring_type = HTT_SW_TO_SW_RING;
536 #ifdef IPA_OFFLOAD
537 		} else if (srng_params.ring_id ==
538 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
539 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
540 			htt_ring_type = HTT_SW_TO_SW_RING;
541 #endif
542 #else
543 		if (srng_params.ring_id ==
544 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
545 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
546 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
547 			htt_ring_type = HTT_SW_TO_HW_RING;
548 #endif
549 		} else if (srng_params.ring_id ==
550 #ifdef IPA_OFFLOAD
551 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
552 #else
553 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
554 #endif
555 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
556 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
557 			htt_ring_type = HTT_SW_TO_HW_RING;
558 		} else {
559 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
560 				   "%s: Ring %d currently not supported",
561 				   __func__, srng_params.ring_id);
562 			goto fail1;
563 		}
564 
565 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
566 			  "%s: ring_type %d ring_id %d",
567 			  __func__, hal_ring_type, srng_params.ring_id);
568 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
569 			  "%s: hp_addr 0x%llx tp_addr 0x%llx",
570 			  __func__, (uint64_t)hp_addr, (uint64_t)tp_addr);
571 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
572 			  "%s: htt_ring_id %d", __func__, htt_ring_id);
573 		break;
574 	case RXDMA_MONITOR_BUF:
575 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
576 		htt_ring_type = HTT_SW_TO_HW_RING;
577 		break;
578 	case RXDMA_MONITOR_STATUS:
579 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
580 		htt_ring_type = HTT_SW_TO_HW_RING;
581 		break;
582 	case RXDMA_MONITOR_DST:
583 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
584 		htt_ring_type = HTT_HW_TO_SW_RING;
585 		break;
586 	case RXDMA_MONITOR_DESC:
587 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
588 		htt_ring_type = HTT_SW_TO_HW_RING;
589 		break;
590 	case RXDMA_DST:
591 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
592 		htt_ring_type = HTT_HW_TO_SW_RING;
593 		break;
594 
595 	default:
596 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
597 			"%s: Ring currently not supported", __func__);
598 			goto fail1;
599 	}
600 
601 	/*
602 	 * Set the length of the message.
603 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
604 	 * separately during the below call to qdf_nbuf_push_head.
605 	 * The contribution from the HTC header is added separately inside HTC.
606 	 */
607 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
608 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
609 			"%s: Failed to expand head for SRING_SETUP msg",
610 			__func__);
611 		return QDF_STATUS_E_FAILURE;
612 	}
613 
614 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
615 
616 	/* rewind beyond alignment pad to get to the HTC header reserved area */
617 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
618 
619 	/* word 0 */
620 	*msg_word = 0;
621 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
622 
623 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
624 			(htt_ring_type == HTT_HW_TO_SW_RING))
625 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
626 			 DP_SW2HW_MACID(mac_id));
627 	else
628 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
629 
630 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
631 		  "%s: mac_id %d", __func__, mac_id);
632 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
633 	/* TODO: Discuss with FW on changing this to unique ID and using
634 	 * htt_ring_type to send the type of ring
635 	 */
636 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
637 
638 	/* word 1 */
639 	msg_word++;
640 	*msg_word = 0;
641 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
642 		srng_params.ring_base_paddr & 0xffffffff);
643 
644 	/* word 2 */
645 	msg_word++;
646 	*msg_word = 0;
647 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
648 		(uint64_t)srng_params.ring_base_paddr >> 32);
649 
650 	/* word 3 */
651 	msg_word++;
652 	*msg_word = 0;
653 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
654 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
655 		(ring_entry_size * srng_params.num_entries));
656 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
657 		  "%s: entry_size %d", __func__,
658 			 ring_entry_size);
659 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
660 		  "%s: num_entries %d", __func__,
661 			 srng_params.num_entries);
662 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
663 		  "%s: ring_size %d", __func__,
664 			 (ring_entry_size * srng_params.num_entries));
665 	if (htt_ring_type == HTT_SW_TO_HW_RING)
666 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
667 						*msg_word, 1);
668 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
669 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
670 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
671 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
672 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
673 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
674 
675 	/* word 4 */
676 	msg_word++;
677 	*msg_word = 0;
678 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
679 		hp_addr & 0xffffffff);
680 
681 	/* word 5 */
682 	msg_word++;
683 	*msg_word = 0;
684 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
685 		(uint64_t)hp_addr >> 32);
686 
687 	/* word 6 */
688 	msg_word++;
689 	*msg_word = 0;
690 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
691 		tp_addr & 0xffffffff);
692 
693 	/* word 7 */
694 	msg_word++;
695 	*msg_word = 0;
696 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
697 		(uint64_t)tp_addr >> 32);
698 
699 	/* word 8 */
700 	msg_word++;
701 	*msg_word = 0;
702 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
703 		srng_params.msi_addr & 0xffffffff);
704 
705 	/* word 9 */
706 	msg_word++;
707 	*msg_word = 0;
708 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
709 		(uint64_t)(srng_params.msi_addr) >> 32);
710 
711 	/* word 10 */
712 	msg_word++;
713 	*msg_word = 0;
714 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
715 		srng_params.msi_data);
716 
717 	/* word 11 */
718 	msg_word++;
719 	*msg_word = 0;
720 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
721 		srng_params.intr_batch_cntr_thres_entries *
722 		ring_entry_size);
723 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
724 		srng_params.intr_timer_thres_us >> 3);
725 
726 	/* word 12 */
727 	msg_word++;
728 	*msg_word = 0;
729 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
730 		/* TODO: Setting low threshold to 1/8th of ring size - see
731 		 * if this needs to be configurable
732 		 */
733 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
734 			srng_params.low_threshold);
735 	}
736 	/* "response_required" field should be set if a HTT response message is
737 	 * required after setting up the ring.
738 	 */
739 	pkt = htt_htc_pkt_alloc(soc);
740 	if (!pkt)
741 		goto fail1;
742 
743 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
744 
745 	SET_HTC_PACKET_INFO_TX(
746 		&pkt->htc_pkt,
747 		dp_htt_h2t_send_complete_free_netbuf,
748 		qdf_nbuf_data(htt_msg),
749 		qdf_nbuf_len(htt_msg),
750 		soc->htc_endpoint,
751 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
752 
753 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
754 	DP_HTT_SEND_HTC_PKT(soc, pkt);
755 
756 	return QDF_STATUS_SUCCESS;
757 
758 fail1:
759 	qdf_nbuf_free(htt_msg);
760 fail0:
761 	return QDF_STATUS_E_FAILURE;
762 }
763 
764 /*
765  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
766  * config message to target
767  * @htt_soc:	HTT SOC handle
768  * @pdev_id:	PDEV Id
769  * @hal_srng:	Opaque HAL SRNG pointer
770  * @hal_ring_type:	SRNG ring type
771  * @ring_buf_size:	SRNG buffer size
772  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
773  * Return: 0 on success; error code on failure
774  */
775 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
776 	int hal_ring_type, int ring_buf_size,
777 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
778 {
779 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
780 	struct dp_htt_htc_pkt *pkt;
781 	qdf_nbuf_t htt_msg;
782 	uint32_t *msg_word;
783 	struct hal_srng_params srng_params;
784 	uint32_t htt_ring_type, htt_ring_id;
785 	uint32_t tlv_filter;
786 
787 	htt_msg = qdf_nbuf_alloc(soc->osdev,
788 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
789 	/* reserve room for the HTC header */
790 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
791 	if (!htt_msg)
792 		goto fail0;
793 
794 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
795 
796 	switch (hal_ring_type) {
797 	case RXDMA_BUF:
798 #if QCA_HOST2FW_RXBUF_RING
799 		htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
800 		htt_ring_type = HTT_SW_TO_SW_RING;
801 #else
802 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
803 		htt_ring_type = HTT_SW_TO_HW_RING;
804 #endif
805 		break;
806 	case RXDMA_MONITOR_BUF:
807 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
808 		htt_ring_type = HTT_SW_TO_HW_RING;
809 		break;
810 	case RXDMA_MONITOR_STATUS:
811 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
812 		htt_ring_type = HTT_SW_TO_HW_RING;
813 		break;
814 	case RXDMA_MONITOR_DST:
815 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
816 		htt_ring_type = HTT_HW_TO_SW_RING;
817 		break;
818 	case RXDMA_MONITOR_DESC:
819 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
820 		htt_ring_type = HTT_SW_TO_HW_RING;
821 		break;
822 	case RXDMA_DST:
823 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
824 		htt_ring_type = HTT_HW_TO_SW_RING;
825 		break;
826 
827 	default:
828 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
829 			"%s: Ring currently not supported", __func__);
830 		goto fail1;
831 	}
832 
833 	/*
834 	 * Set the length of the message.
835 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
836 	 * separately during the below call to qdf_nbuf_push_head.
837 	 * The contribution from the HTC header is added separately inside HTC.
838 	 */
839 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
840 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
841 			"%s: Failed to expand head for RX Ring Cfg msg",
842 			__func__);
843 		goto fail1; /* failure */
844 	}
845 
846 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
847 
848 	/* rewind beyond alignment pad to get to the HTC header reserved area */
849 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
850 
851 	/* word 0 */
852 	*msg_word = 0;
853 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
854 
855 	/*
856 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
857 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
858 	 */
859 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
860 			htt_ring_type == HTT_SW_TO_HW_RING)
861 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
862 						DP_SW2HW_MACID(pdev_id));
863 
864 	/* TODO: Discuss with FW on changing this to unique ID and using
865 	 * htt_ring_type to send the type of ring
866 	 */
867 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
868 
869 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
870 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
871 
872 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
873 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
874 
875 	/* word 1 */
876 	msg_word++;
877 	*msg_word = 0;
878 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
879 		ring_buf_size);
880 
881 	/* word 2 */
882 	msg_word++;
883 	*msg_word = 0;
884 
885 	if (htt_tlv_filter->enable_fp) {
886 		/* TYPE: MGMT */
887 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
888 			FP, MGMT, 0000,
889 			(htt_tlv_filter->fp_mgmt_filter &
890 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
891 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
892 			FP, MGMT, 0001,
893 			(htt_tlv_filter->fp_mgmt_filter &
894 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
895 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
896 			FP, MGMT, 0010,
897 			(htt_tlv_filter->fp_mgmt_filter &
898 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
899 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
900 			FP, MGMT, 0011,
901 			(htt_tlv_filter->fp_mgmt_filter &
902 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
903 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
904 			FP, MGMT, 0100,
905 			(htt_tlv_filter->fp_mgmt_filter &
906 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
907 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
908 			FP, MGMT, 0101,
909 			(htt_tlv_filter->fp_mgmt_filter &
910 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
911 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
912 			FP, MGMT, 0110,
913 			(htt_tlv_filter->fp_mgmt_filter &
914 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
915 		/* reserved */
916 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
917 			MGMT, 0111,
918 			(htt_tlv_filter->fp_mgmt_filter &
919 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
920 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
921 			FP, MGMT, 1000,
922 			(htt_tlv_filter->fp_mgmt_filter &
923 			FILTER_MGMT_BEACON) ? 1 : 0);
924 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
925 			FP, MGMT, 1001,
926 			(htt_tlv_filter->fp_mgmt_filter &
927 			FILTER_MGMT_ATIM) ? 1 : 0);
928 	}
929 
930 	if (htt_tlv_filter->enable_md) {
931 			/* TYPE: MGMT */
932 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
933 			MD, MGMT, 0000,
934 			(htt_tlv_filter->md_mgmt_filter &
935 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
936 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
937 			MD, MGMT, 0001,
938 			(htt_tlv_filter->md_mgmt_filter &
939 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
940 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
941 			MD, MGMT, 0010,
942 			(htt_tlv_filter->md_mgmt_filter &
943 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
944 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
945 			MD, MGMT, 0011,
946 			(htt_tlv_filter->md_mgmt_filter &
947 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
948 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
949 			MD, MGMT, 0100,
950 			(htt_tlv_filter->md_mgmt_filter &
951 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
952 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
953 			MD, MGMT, 0101,
954 			(htt_tlv_filter->md_mgmt_filter &
955 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
956 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
957 			MD, MGMT, 0110,
958 			(htt_tlv_filter->md_mgmt_filter &
959 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
960 		/* reserved */
961 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
962 			MGMT, 0111,
963 			(htt_tlv_filter->md_mgmt_filter &
964 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
965 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
966 			MD, MGMT, 1000,
967 			(htt_tlv_filter->md_mgmt_filter &
968 			FILTER_MGMT_BEACON) ? 1 : 0);
969 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
970 			MD, MGMT, 1001,
971 			(htt_tlv_filter->md_mgmt_filter &
972 			FILTER_MGMT_ATIM) ? 1 : 0);
973 	}
974 
975 	if (htt_tlv_filter->enable_mo) {
976 		/* TYPE: MGMT */
977 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
978 			MO, MGMT, 0000,
979 			(htt_tlv_filter->mo_mgmt_filter &
980 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
981 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
982 			MO, MGMT, 0001,
983 			(htt_tlv_filter->mo_mgmt_filter &
984 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
985 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
986 			MO, MGMT, 0010,
987 			(htt_tlv_filter->mo_mgmt_filter &
988 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
989 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
990 			MO, MGMT, 0011,
991 			(htt_tlv_filter->mo_mgmt_filter &
992 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
993 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
994 			MO, MGMT, 0100,
995 			(htt_tlv_filter->mo_mgmt_filter &
996 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
997 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
998 			MO, MGMT, 0101,
999 			(htt_tlv_filter->mo_mgmt_filter &
1000 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1001 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1002 			MO, MGMT, 0110,
1003 			(htt_tlv_filter->mo_mgmt_filter &
1004 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1005 		/* reserved */
1006 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1007 			MGMT, 0111,
1008 			(htt_tlv_filter->mo_mgmt_filter &
1009 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1010 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1011 			MO, MGMT, 1000,
1012 			(htt_tlv_filter->mo_mgmt_filter &
1013 			FILTER_MGMT_BEACON) ? 1 : 0);
1014 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1015 			MO, MGMT, 1001,
1016 			(htt_tlv_filter->mo_mgmt_filter &
1017 			FILTER_MGMT_ATIM) ? 1 : 0);
1018 	}
1019 
1020 	/* word 3 */
1021 	msg_word++;
1022 	*msg_word = 0;
1023 
1024 	if (htt_tlv_filter->enable_fp) {
1025 		/* TYPE: MGMT */
1026 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1027 			FP, MGMT, 1010,
1028 			(htt_tlv_filter->fp_mgmt_filter &
1029 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1030 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1031 			FP, MGMT, 1011,
1032 			(htt_tlv_filter->fp_mgmt_filter &
1033 			FILTER_MGMT_AUTH) ? 1 : 0);
1034 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1035 			FP, MGMT, 1100,
1036 			(htt_tlv_filter->fp_mgmt_filter &
1037 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1038 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1039 			FP, MGMT, 1101,
1040 			(htt_tlv_filter->fp_mgmt_filter &
1041 			FILTER_MGMT_ACTION) ? 1 : 0);
1042 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1043 			FP, MGMT, 1110,
1044 			(htt_tlv_filter->fp_mgmt_filter &
1045 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1046 		/* reserved*/
1047 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1048 			MGMT, 1111,
1049 			(htt_tlv_filter->fp_mgmt_filter &
1050 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1051 	}
1052 
1053 	if (htt_tlv_filter->enable_md) {
1054 			/* TYPE: MGMT */
1055 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1056 			MD, MGMT, 1010,
1057 			(htt_tlv_filter->md_mgmt_filter &
1058 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1059 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1060 			MD, MGMT, 1011,
1061 			(htt_tlv_filter->md_mgmt_filter &
1062 			FILTER_MGMT_AUTH) ? 1 : 0);
1063 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1064 			MD, MGMT, 1100,
1065 			(htt_tlv_filter->md_mgmt_filter &
1066 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1067 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1068 			MD, MGMT, 1101,
1069 			(htt_tlv_filter->md_mgmt_filter &
1070 			FILTER_MGMT_ACTION) ? 1 : 0);
1071 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1072 			MD, MGMT, 1110,
1073 			(htt_tlv_filter->md_mgmt_filter &
1074 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1075 	}
1076 
1077 	if (htt_tlv_filter->enable_mo) {
1078 		/* TYPE: MGMT */
1079 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1080 			MO, MGMT, 1010,
1081 			(htt_tlv_filter->mo_mgmt_filter &
1082 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1083 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1084 			MO, MGMT, 1011,
1085 			(htt_tlv_filter->mo_mgmt_filter &
1086 			FILTER_MGMT_AUTH) ? 1 : 0);
1087 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1088 			MO, MGMT, 1100,
1089 			(htt_tlv_filter->mo_mgmt_filter &
1090 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1091 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1092 			MO, MGMT, 1101,
1093 			(htt_tlv_filter->mo_mgmt_filter &
1094 			FILTER_MGMT_ACTION) ? 1 : 0);
1095 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1096 			MO, MGMT, 1110,
1097 			(htt_tlv_filter->mo_mgmt_filter &
1098 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1099 		/* reserved*/
1100 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1101 			MGMT, 1111,
1102 			(htt_tlv_filter->mo_mgmt_filter &
1103 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1104 	}
1105 
1106 	/* word 4 */
1107 	msg_word++;
1108 	*msg_word = 0;
1109 
1110 	if (htt_tlv_filter->enable_fp) {
1111 		/* TYPE: CTRL */
1112 		/* reserved */
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1114 			CTRL, 0000,
1115 			(htt_tlv_filter->fp_ctrl_filter &
1116 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1117 		/* reserved */
1118 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1119 			CTRL, 0001,
1120 			(htt_tlv_filter->fp_ctrl_filter &
1121 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1122 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1123 			CTRL, 0010,
1124 			(htt_tlv_filter->fp_ctrl_filter &
1125 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1126 		/* reserved */
1127 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1128 			CTRL, 0011,
1129 			(htt_tlv_filter->fp_ctrl_filter &
1130 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1131 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1132 			CTRL, 0100,
1133 			(htt_tlv_filter->fp_ctrl_filter &
1134 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1135 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1136 			CTRL, 0101,
1137 			(htt_tlv_filter->fp_ctrl_filter &
1138 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1139 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1140 			CTRL, 0110,
1141 			(htt_tlv_filter->fp_ctrl_filter &
1142 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1143 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1144 			CTRL, 0111,
1145 			(htt_tlv_filter->fp_ctrl_filter &
1146 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1147 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1148 			CTRL, 1000,
1149 			(htt_tlv_filter->fp_ctrl_filter &
1150 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1151 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1152 			CTRL, 1001,
1153 			(htt_tlv_filter->fp_ctrl_filter &
1154 			FILTER_CTRL_BA) ? 1 : 0);
1155 	}
1156 
1157 	if (htt_tlv_filter->enable_md) {
1158 		/* TYPE: CTRL */
1159 		/* reserved */
1160 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1161 			CTRL, 0000,
1162 			(htt_tlv_filter->md_ctrl_filter &
1163 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1164 		/* reserved */
1165 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1166 			CTRL, 0001,
1167 			(htt_tlv_filter->md_ctrl_filter &
1168 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1169 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1170 			CTRL, 0010,
1171 			(htt_tlv_filter->md_ctrl_filter &
1172 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1173 		/* reserved */
1174 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1175 			CTRL, 0011,
1176 			(htt_tlv_filter->md_ctrl_filter &
1177 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1178 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1179 			CTRL, 0100,
1180 			(htt_tlv_filter->md_ctrl_filter &
1181 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1182 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1183 			CTRL, 0101,
1184 			(htt_tlv_filter->md_ctrl_filter &
1185 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1186 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1187 			CTRL, 0110,
1188 			(htt_tlv_filter->md_ctrl_filter &
1189 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1190 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1191 			CTRL, 0111,
1192 			(htt_tlv_filter->md_ctrl_filter &
1193 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1194 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1195 			CTRL, 1000,
1196 			(htt_tlv_filter->md_ctrl_filter &
1197 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1198 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1199 			CTRL, 1001,
1200 			(htt_tlv_filter->md_ctrl_filter &
1201 			FILTER_CTRL_BA) ? 1 : 0);
1202 	}
1203 
1204 	if (htt_tlv_filter->enable_mo) {
1205 		/* TYPE: CTRL */
1206 		/* reserved */
1207 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1208 			CTRL, 0000,
1209 			(htt_tlv_filter->mo_ctrl_filter &
1210 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1211 		/* reserved */
1212 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1213 			CTRL, 0001,
1214 			(htt_tlv_filter->mo_ctrl_filter &
1215 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1216 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1217 			CTRL, 0010,
1218 			(htt_tlv_filter->mo_ctrl_filter &
1219 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1220 		/* reserved */
1221 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1222 			CTRL, 0011,
1223 			(htt_tlv_filter->mo_ctrl_filter &
1224 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1225 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1226 			CTRL, 0100,
1227 			(htt_tlv_filter->mo_ctrl_filter &
1228 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1229 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1230 			CTRL, 0101,
1231 			(htt_tlv_filter->mo_ctrl_filter &
1232 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1233 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1234 			CTRL, 0110,
1235 			(htt_tlv_filter->mo_ctrl_filter &
1236 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1237 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1238 			CTRL, 0111,
1239 			(htt_tlv_filter->mo_ctrl_filter &
1240 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1241 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1242 			CTRL, 1000,
1243 			(htt_tlv_filter->mo_ctrl_filter &
1244 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1245 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1246 			CTRL, 1001,
1247 			(htt_tlv_filter->mo_ctrl_filter &
1248 			FILTER_CTRL_BA) ? 1 : 0);
1249 	}
1250 
1251 	/* word 5 */
1252 	msg_word++;
1253 	*msg_word = 0;
1254 	if (htt_tlv_filter->enable_fp) {
1255 		/* TYPE: CTRL */
1256 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1257 			CTRL, 1010,
1258 			(htt_tlv_filter->fp_ctrl_filter &
1259 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1260 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1261 			CTRL, 1011,
1262 			(htt_tlv_filter->fp_ctrl_filter &
1263 			FILTER_CTRL_RTS) ? 1 : 0);
1264 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1265 			CTRL, 1100,
1266 			(htt_tlv_filter->fp_ctrl_filter &
1267 			FILTER_CTRL_CTS) ? 1 : 0);
1268 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1269 			CTRL, 1101,
1270 			(htt_tlv_filter->fp_ctrl_filter &
1271 			FILTER_CTRL_ACK) ? 1 : 0);
1272 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1273 			CTRL, 1110,
1274 			(htt_tlv_filter->fp_ctrl_filter &
1275 			FILTER_CTRL_CFEND) ? 1 : 0);
1276 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1277 			CTRL, 1111,
1278 			(htt_tlv_filter->fp_ctrl_filter &
1279 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1280 		/* TYPE: DATA */
1281 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1282 			DATA, MCAST,
1283 			(htt_tlv_filter->fp_data_filter &
1284 			FILTER_DATA_MCAST) ? 1 : 0);
1285 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1286 			DATA, UCAST,
1287 			(htt_tlv_filter->fp_data_filter &
1288 			FILTER_DATA_UCAST) ? 1 : 0);
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1290 			DATA, NULL,
1291 			(htt_tlv_filter->fp_data_filter &
1292 			FILTER_DATA_NULL) ? 1 : 0);
1293 	}
1294 
1295 	if (htt_tlv_filter->enable_md) {
1296 		/* TYPE: CTRL */
1297 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1298 			CTRL, 1010,
1299 			(htt_tlv_filter->md_ctrl_filter &
1300 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1301 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1302 			CTRL, 1011,
1303 			(htt_tlv_filter->md_ctrl_filter &
1304 			FILTER_CTRL_RTS) ? 1 : 0);
1305 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1306 			CTRL, 1100,
1307 			(htt_tlv_filter->md_ctrl_filter &
1308 			FILTER_CTRL_CTS) ? 1 : 0);
1309 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1310 			CTRL, 1101,
1311 			(htt_tlv_filter->md_ctrl_filter &
1312 			FILTER_CTRL_ACK) ? 1 : 0);
1313 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1314 			CTRL, 1110,
1315 			(htt_tlv_filter->md_ctrl_filter &
1316 			FILTER_CTRL_CFEND) ? 1 : 0);
1317 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1318 			CTRL, 1111,
1319 			(htt_tlv_filter->md_ctrl_filter &
1320 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1321 		/* TYPE: DATA */
1322 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1323 			DATA, MCAST,
1324 			(htt_tlv_filter->md_data_filter &
1325 			FILTER_DATA_MCAST) ? 1 : 0);
1326 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1327 			DATA, UCAST,
1328 			(htt_tlv_filter->md_data_filter &
1329 			FILTER_DATA_UCAST) ? 1 : 0);
1330 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1331 			DATA, NULL,
1332 			(htt_tlv_filter->md_data_filter &
1333 			FILTER_DATA_NULL) ? 1 : 0);
1334 	}
1335 
1336 	if (htt_tlv_filter->enable_mo) {
1337 		/* TYPE: CTRL */
1338 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1339 			CTRL, 1010,
1340 			(htt_tlv_filter->mo_ctrl_filter &
1341 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1342 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1343 			CTRL, 1011,
1344 			(htt_tlv_filter->mo_ctrl_filter &
1345 			FILTER_CTRL_RTS) ? 1 : 0);
1346 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1347 			CTRL, 1100,
1348 			(htt_tlv_filter->mo_ctrl_filter &
1349 			FILTER_CTRL_CTS) ? 1 : 0);
1350 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1351 			CTRL, 1101,
1352 			(htt_tlv_filter->mo_ctrl_filter &
1353 			FILTER_CTRL_ACK) ? 1 : 0);
1354 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1355 			CTRL, 1110,
1356 			(htt_tlv_filter->mo_ctrl_filter &
1357 			FILTER_CTRL_CFEND) ? 1 : 0);
1358 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1359 			CTRL, 1111,
1360 			(htt_tlv_filter->mo_ctrl_filter &
1361 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1362 		/* TYPE: DATA */
1363 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1364 			DATA, MCAST,
1365 			(htt_tlv_filter->mo_data_filter &
1366 			FILTER_DATA_MCAST) ? 1 : 0);
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1368 			DATA, UCAST,
1369 			(htt_tlv_filter->mo_data_filter &
1370 			FILTER_DATA_UCAST) ? 1 : 0);
1371 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1372 			DATA, NULL,
1373 			(htt_tlv_filter->mo_data_filter &
1374 			FILTER_DATA_NULL) ? 1 : 0);
1375 	}
1376 
1377 	/* word 6 */
1378 	msg_word++;
1379 	*msg_word = 0;
1380 	tlv_filter = 0;
1381 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1382 		htt_tlv_filter->mpdu_start);
1383 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1384 		htt_tlv_filter->msdu_start);
1385 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1386 		htt_tlv_filter->packet);
1387 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1388 		htt_tlv_filter->msdu_end);
1389 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1390 		htt_tlv_filter->mpdu_end);
1391 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1392 		htt_tlv_filter->packet_header);
1393 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1394 		htt_tlv_filter->attention);
1395 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1396 		htt_tlv_filter->ppdu_start);
1397 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1398 		htt_tlv_filter->ppdu_end);
1399 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1400 		htt_tlv_filter->ppdu_end_user_stats);
1401 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1402 		PPDU_END_USER_STATS_EXT,
1403 		htt_tlv_filter->ppdu_end_user_stats_ext);
1404 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1405 		htt_tlv_filter->ppdu_end_status_done);
1406 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1407 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1408 		 htt_tlv_filter->header_per_msdu);
1409 
1410 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1411 
1412 	/* "response_required" field should be set if a HTT response message is
1413 	 * required after setting up the ring.
1414 	 */
1415 	pkt = htt_htc_pkt_alloc(soc);
1416 	if (!pkt)
1417 		goto fail1;
1418 
1419 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1420 
1421 	SET_HTC_PACKET_INFO_TX(
1422 		&pkt->htc_pkt,
1423 		dp_htt_h2t_send_complete_free_netbuf,
1424 		qdf_nbuf_data(htt_msg),
1425 		qdf_nbuf_len(htt_msg),
1426 		soc->htc_endpoint,
1427 		1); /* tag - not relevant here */
1428 
1429 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1430 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1431 	return QDF_STATUS_SUCCESS;
1432 
1433 fail1:
1434 	qdf_nbuf_free(htt_msg);
1435 fail0:
1436 	return QDF_STATUS_E_FAILURE;
1437 }
1438 
1439 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1440 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1441 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1442 
1443 {
1444 	uint32_t pdev_id;
1445 	uint32_t *msg_word = NULL;
1446 	uint32_t msg_remain_len = 0;
1447 
1448 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1449 
1450 	/*COOKIE MSB*/
1451 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1452 
1453 	/* stats message length + 16 size of HTT header*/
1454 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1455 				(uint32_t)DP_EXT_MSG_LENGTH);
1456 
1457 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1458 			msg_word,  msg_remain_len,
1459 			WDI_NO_VAL, pdev_id);
1460 
1461 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1462 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1463 	}
1464 	/* Need to be freed here as WDI handler will
1465 	 * make a copy of pkt to send data to application
1466 	 */
1467 	qdf_nbuf_free(htt_msg);
1468 	return QDF_STATUS_SUCCESS;
1469 }
1470 #else
1471 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1472 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1473 {
1474 	return QDF_STATUS_E_NOSUPPORT;
1475 }
1476 #endif
1477 
1478 /**
1479  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1480  * @htt_stats: htt stats info
1481  *
1482  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1483  * contains sub messages which are identified by a TLV header.
1484  * In this function we will process the stream of T2H messages and read all the
1485  * TLV contained in the message.
1486  *
1487  * THe following cases have been taken care of
1488  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1489  *		In this case the buffer will contain multiple tlvs.
1490  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1491  *		Only one tlv will be contained in the HTT message and this tag
1492  *		will extend onto the next buffer.
1493  * Case 3: When the buffer is the continuation of the previous message
1494  * Case 4: tlv length is 0. which will indicate the end of message
1495  *
1496  * return: void
1497  */
1498 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1499 					struct dp_soc *soc)
1500 {
1501 	htt_tlv_tag_t tlv_type = 0xff;
1502 	qdf_nbuf_t htt_msg = NULL;
1503 	uint32_t *msg_word;
1504 	uint8_t *tlv_buf_head = NULL;
1505 	uint8_t *tlv_buf_tail = NULL;
1506 	uint32_t msg_remain_len = 0;
1507 	uint32_t tlv_remain_len = 0;
1508 	uint32_t *tlv_start;
1509 	int cookie_val;
1510 	int cookie_msb;
1511 	int pdev_id;
1512 	bool copy_stats = false;
1513 	struct dp_pdev *pdev;
1514 
1515 	/* Process node in the HTT message queue */
1516 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1517 		!= NULL) {
1518 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1519 		cookie_val = *(msg_word + 1);
1520 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1521 					*(msg_word +
1522 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1523 
1524 		if (cookie_val) {
1525 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1526 					== QDF_STATUS_SUCCESS) {
1527 				continue;
1528 			}
1529 		}
1530 
1531 		cookie_msb = *(msg_word + 2);
1532 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1533 		pdev = soc->pdev_list[pdev_id];
1534 
1535 		if (cookie_msb >> 2) {
1536 			copy_stats = true;
1537 		}
1538 
1539 		/* read 5th word */
1540 		msg_word = msg_word + 4;
1541 		msg_remain_len = qdf_min(htt_stats->msg_len,
1542 				(uint32_t) DP_EXT_MSG_LENGTH);
1543 		/* Keep processing the node till node length is 0 */
1544 		while (msg_remain_len) {
1545 			/*
1546 			 * if message is not a continuation of previous message
1547 			 * read the tlv type and tlv length
1548 			 */
1549 			if (!tlv_buf_head) {
1550 				tlv_type = HTT_STATS_TLV_TAG_GET(
1551 						*msg_word);
1552 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1553 						*msg_word);
1554 			}
1555 
1556 			if (tlv_remain_len == 0) {
1557 				msg_remain_len = 0;
1558 
1559 				if (tlv_buf_head) {
1560 					qdf_mem_free(tlv_buf_head);
1561 					tlv_buf_head = NULL;
1562 					tlv_buf_tail = NULL;
1563 				}
1564 
1565 				goto error;
1566 			}
1567 
1568 			if (!tlv_buf_head)
1569 				tlv_remain_len += HTT_TLV_HDR_LEN;
1570 
1571 			if ((tlv_remain_len <= msg_remain_len)) {
1572 				/* Case 3 */
1573 				if (tlv_buf_head) {
1574 					qdf_mem_copy(tlv_buf_tail,
1575 							(uint8_t *)msg_word,
1576 							tlv_remain_len);
1577 					tlv_start = (uint32_t *)tlv_buf_head;
1578 				} else {
1579 					/* Case 1 */
1580 					tlv_start = msg_word;
1581 				}
1582 
1583 				if (copy_stats)
1584 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1585 				else
1586 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1587 
1588 				msg_remain_len -= tlv_remain_len;
1589 
1590 				msg_word = (uint32_t *)
1591 					(((uint8_t *)msg_word) +
1592 					tlv_remain_len);
1593 
1594 				tlv_remain_len = 0;
1595 
1596 				if (tlv_buf_head) {
1597 					qdf_mem_free(tlv_buf_head);
1598 					tlv_buf_head = NULL;
1599 					tlv_buf_tail = NULL;
1600 				}
1601 
1602 			} else { /* tlv_remain_len > msg_remain_len */
1603 				/* Case 2 & 3 */
1604 				if (!tlv_buf_head) {
1605 					tlv_buf_head = qdf_mem_malloc(
1606 							tlv_remain_len);
1607 
1608 					if (!tlv_buf_head) {
1609 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1610 								QDF_TRACE_LEVEL_ERROR,
1611 								"Alloc failed");
1612 						goto error;
1613 					}
1614 
1615 					tlv_buf_tail = tlv_buf_head;
1616 				}
1617 
1618 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1619 						msg_remain_len);
1620 				tlv_remain_len -= msg_remain_len;
1621 				tlv_buf_tail += msg_remain_len;
1622 			}
1623 		}
1624 
1625 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1626 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1627 		}
1628 
1629 		qdf_nbuf_free(htt_msg);
1630 	}
1631 	return;
1632 
1633 error:
1634 	qdf_nbuf_free(htt_msg);
1635 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1636 			!= NULL)
1637 		qdf_nbuf_free(htt_msg);
1638 }
1639 
1640 void htt_t2h_stats_handler(void *context)
1641 {
1642 	struct dp_soc *soc = (struct dp_soc *)context;
1643 	struct htt_stats_context htt_stats;
1644 	uint32_t *msg_word;
1645 	qdf_nbuf_t htt_msg = NULL;
1646 	uint8_t done;
1647 	uint8_t rem_stats;
1648 
1649 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1650 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1651 			"soc: 0x%pK, init_done: %d", soc,
1652 			qdf_atomic_read(&soc->cmn_init_done));
1653 		return;
1654 	}
1655 
1656 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1657 	qdf_nbuf_queue_init(&htt_stats.msg);
1658 
1659 	/* pull one completed stats from soc->htt_stats_msg and process */
1660 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1661 	if (!soc->htt_stats.num_stats) {
1662 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1663 		return;
1664 	}
1665 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1666 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1667 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1668 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1669 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1670 		/*
1671 		 * Done bit signifies that this is the last T2H buffer in the
1672 		 * stream of HTT EXT STATS message
1673 		 */
1674 		if (done)
1675 			break;
1676 	}
1677 	rem_stats = --soc->htt_stats.num_stats;
1678 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1679 
1680 	dp_process_htt_stat_msg(&htt_stats, soc);
1681 	/* If there are more stats to process, schedule stats work again */
1682 	if (rem_stats)
1683 		qdf_sched_work(0, &soc->htt_stats.work);
1684 }
1685 
1686 /*
1687  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1688  * if a new peer id arrives in a PPDU
1689  * pdev: DP pdev handle
1690  * @peer_id : peer unique identifier
1691  * @ppdu_info: per ppdu tlv structure
1692  *
1693  * return:user index to be populated
1694  */
1695 #ifdef FEATURE_PERPKT_INFO
1696 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1697 						uint16_t peer_id,
1698 						struct ppdu_info *ppdu_info)
1699 {
1700 	uint8_t user_index = 0;
1701 	struct cdp_tx_completion_ppdu *ppdu_desc;
1702 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1703 
1704 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1705 
1706 	while ((user_index + 1) <= ppdu_info->last_user) {
1707 		ppdu_user_desc = &ppdu_desc->user[user_index];
1708 		if (ppdu_user_desc->peer_id != peer_id) {
1709 			user_index++;
1710 			continue;
1711 		} else {
1712 			/* Max users possible is 8 so user array index should
1713 			 * not exceed 7
1714 			 */
1715 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1716 			return user_index;
1717 		}
1718 	}
1719 
1720 	ppdu_info->last_user++;
1721 	/* Max users possible is 8 so last user should not exceed 8 */
1722 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1723 	return ppdu_info->last_user - 1;
1724 }
1725 
1726 /*
1727  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1728  * pdev: DP pdev handle
1729  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1730  * @ppdu_info: per ppdu tlv structure
1731  *
1732  * return:void
1733  */
1734 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1735 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1736 {
1737 	uint16_t frame_type;
1738 	uint16_t freq;
1739 	struct dp_soc *soc = NULL;
1740 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1741 
1742 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1743 
1744 	tag_buf += 2;
1745 	ppdu_desc->num_users =
1746 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1747 	tag_buf++;
1748 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1749 
1750 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1751 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1752 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1753 	else if ((frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR) ||
1754 		 (frame_type == HTT_STATS_FTYPE_SGEN_BAR))
1755 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1756 	else
1757 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1758 
1759 	tag_buf += 2;
1760 	ppdu_desc->tx_duration = *tag_buf;
1761 	tag_buf += 3;
1762 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1763 
1764 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1765 					ppdu_desc->tx_duration;
1766 	/* Ack time stamp is same as end time stamp*/
1767 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1768 
1769 	tag_buf++;
1770 
1771 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1772 	if (freq != ppdu_desc->channel) {
1773 		soc = pdev->soc;
1774 		ppdu_desc->channel = freq;
1775 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1776 			pdev->operating_channel =
1777 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
1778 	}
1779 
1780 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1781 }
1782 
1783 /*
1784  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1785  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1786  * @ppdu_info: per ppdu tlv structure
1787  *
1788  * return:void
1789  */
1790 static void dp_process_ppdu_stats_user_common_tlv(
1791 		struct dp_pdev *pdev, uint32_t *tag_buf,
1792 		struct ppdu_info *ppdu_info)
1793 {
1794 	uint16_t peer_id;
1795 	struct dp_peer *peer;
1796 	struct cdp_tx_completion_ppdu *ppdu_desc;
1797 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1798 	uint8_t curr_user_index = 0;
1799 
1800 	ppdu_desc =
1801 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1802 
1803 	tag_buf++;
1804 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1805 
1806 	curr_user_index =
1807 		dp_get_ppdu_info_user_index(pdev,
1808 					    peer_id, ppdu_info);
1809 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1810 
1811 	if (peer_id == DP_SCAN_PEER_ID) {
1812 		ppdu_desc->vdev_id =
1813 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
1814 	} else {
1815 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1816 		if (!peer)
1817 			return;
1818 	}
1819 
1820 	ppdu_user_desc->peer_id = peer_id;
1821 
1822 	tag_buf++;
1823 
1824 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1825 		ppdu_user_desc->delayed_ba = 1;
1826 	}
1827 
1828 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1829 		ppdu_user_desc->is_mcast = true;
1830 		ppdu_user_desc->mpdu_tried_mcast =
1831 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1832 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1833 	} else {
1834 		ppdu_user_desc->mpdu_tried_ucast =
1835 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1836 	}
1837 
1838 	tag_buf++;
1839 
1840 	ppdu_user_desc->qos_ctrl =
1841 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1842 	ppdu_user_desc->frame_ctrl =
1843 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1844 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1845 
1846 	if (ppdu_user_desc->delayed_ba) {
1847 		ppdu_user_desc->mpdu_success = 0;
1848 		ppdu_user_desc->mpdu_tried_mcast = 0;
1849 		ppdu_user_desc->mpdu_tried_ucast = 0;
1850 	}
1851 }
1852 
1853 
1854 /**
1855  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1856  * @pdev: DP pdev handle
1857  * @tag_buf: T2H message buffer carrying the user rate TLV
1858  * @ppdu_info: per ppdu tlv structure
1859  *
1860  * return:void
1861  */
1862 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1863 		uint32_t *tag_buf,
1864 		struct ppdu_info *ppdu_info)
1865 {
1866 	uint16_t peer_id;
1867 	struct dp_peer *peer;
1868 	struct cdp_tx_completion_ppdu *ppdu_desc;
1869 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1870 	uint8_t curr_user_index = 0;
1871 	struct dp_vdev *vdev;
1872 
1873 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1874 
1875 	tag_buf++;
1876 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1877 
1878 	curr_user_index =
1879 		dp_get_ppdu_info_user_index(pdev,
1880 					    peer_id, ppdu_info);
1881 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1882 
1883 	if (peer_id == DP_SCAN_PEER_ID) {
1884 		vdev =
1885 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
1886 							  ppdu_desc->vdev_id);
1887 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
1888 			     DP_MAC_ADDR_LEN);
1889 	} else {
1890 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1891 		if (!peer)
1892 			return;
1893 		qdf_mem_copy(ppdu_user_desc->mac_addr,
1894 			     peer->mac_addr.raw, DP_MAC_ADDR_LEN);
1895 	}
1896 
1897 	ppdu_user_desc->peer_id = peer_id;
1898 
1899 	ppdu_user_desc->tid =
1900 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1901 
1902 	tag_buf += 2;
1903 
1904 	ppdu_user_desc->ru_tones =
1905 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1906 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1907 
1908 	tag_buf += 2;
1909 
1910 	ppdu_user_desc->ppdu_type =
1911 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1912 
1913 	tag_buf++;
1914 	ppdu_user_desc->tx_rate = *tag_buf;
1915 
1916 	ppdu_user_desc->ltf_size =
1917 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1918 	ppdu_user_desc->stbc =
1919 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1920 	ppdu_user_desc->he_re =
1921 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1922 	ppdu_user_desc->txbf =
1923 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
1924 	ppdu_user_desc->bw =
1925 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
1926 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
1927 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
1928 	ppdu_user_desc->preamble =
1929 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
1930 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
1931 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
1932 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
1933 }
1934 
1935 /*
1936  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
1937  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1938  * pdev: DP PDEV handle
1939  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1940  * @ppdu_info: per ppdu tlv structure
1941  *
1942  * return:void
1943  */
1944 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
1945 		struct dp_pdev *pdev, uint32_t *tag_buf,
1946 		struct ppdu_info *ppdu_info)
1947 {
1948 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
1949 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
1950 
1951 	struct cdp_tx_completion_ppdu *ppdu_desc;
1952 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1953 	uint8_t curr_user_index = 0;
1954 	uint16_t peer_id;
1955 	struct dp_peer *peer;
1956 
1957 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1958 
1959 	tag_buf++;
1960 
1961 	peer_id =
1962 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1963 
1964 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1965 
1966 	if (!peer)
1967 		return;
1968 
1969 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1970 
1971 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1972 	ppdu_user_desc->peer_id = peer_id;
1973 
1974 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1975 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1976 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1977 }
1978 
1979 /*
1980  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
1981  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1982  * soc: DP SOC handle
1983  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1984  * @ppdu_info: per ppdu tlv structure
1985  *
1986  * return:void
1987  */
1988 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
1989 		struct dp_pdev *pdev, uint32_t *tag_buf,
1990 		struct ppdu_info *ppdu_info)
1991 {
1992 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
1993 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
1994 
1995 	struct cdp_tx_completion_ppdu *ppdu_desc;
1996 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1997 	uint8_t curr_user_index = 0;
1998 	uint16_t peer_id;
1999 	struct dp_peer *peer;
2000 
2001 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2002 
2003 	tag_buf++;
2004 
2005 	peer_id =
2006 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2007 
2008 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2009 
2010 	if (!peer)
2011 		return;
2012 
2013 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2014 
2015 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2016 	ppdu_user_desc->peer_id = peer_id;
2017 
2018 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2019 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2020 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2021 }
2022 
2023 /*
2024  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2025  * htt_ppdu_stats_user_cmpltn_common_tlv
2026  * soc: DP SOC handle
2027  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2028  * @ppdu_info: per ppdu tlv structure
2029  *
2030  * return:void
2031  */
2032 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2033 		struct dp_pdev *pdev, uint32_t *tag_buf,
2034 		struct ppdu_info *ppdu_info)
2035 {
2036 	uint16_t peer_id;
2037 	struct dp_peer *peer;
2038 	struct cdp_tx_completion_ppdu *ppdu_desc;
2039 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2040 	uint8_t curr_user_index = 0;
2041 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2042 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2043 
2044 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2045 
2046 	tag_buf++;
2047 	peer_id =
2048 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2049 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2050 
2051 	if (!peer)
2052 		return;
2053 
2054 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2055 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2056 	ppdu_user_desc->peer_id = peer_id;
2057 
2058 	ppdu_user_desc->completion_status =
2059 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2060 				*tag_buf);
2061 
2062 	ppdu_user_desc->tid =
2063 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2064 
2065 
2066 	tag_buf++;
2067 	if (qdf_likely(ppdu_user_desc->completion_status)) {
2068 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2069 		ppdu_user_desc->ack_rssi_valid = 1;
2070 	} else {
2071 		ppdu_user_desc->ack_rssi_valid = 0;
2072 	}
2073 
2074 	tag_buf++;
2075 
2076 	ppdu_user_desc->mpdu_success =
2077 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2078 
2079 	tag_buf++;
2080 
2081 	ppdu_user_desc->long_retries =
2082 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2083 
2084 	ppdu_user_desc->short_retries =
2085 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2086 	ppdu_user_desc->retry_msdus =
2087 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2088 
2089 	ppdu_user_desc->is_ampdu =
2090 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2091 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2092 
2093 }
2094 
2095 /*
2096  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2097  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2098  * pdev: DP PDEV handle
2099  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2100  * @ppdu_info: per ppdu tlv structure
2101  *
2102  * return:void
2103  */
2104 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2105 		struct dp_pdev *pdev, uint32_t *tag_buf,
2106 		struct ppdu_info *ppdu_info)
2107 {
2108 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2109 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2110 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2111 	struct cdp_tx_completion_ppdu *ppdu_desc;
2112 	uint8_t curr_user_index = 0;
2113 	uint16_t peer_id;
2114 	struct dp_peer *peer;
2115 
2116 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2117 
2118 	tag_buf++;
2119 
2120 	peer_id =
2121 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2122 
2123 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2124 
2125 	if (!peer)
2126 		return;
2127 
2128 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2129 
2130 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2131 	ppdu_user_desc->peer_id = peer_id;
2132 
2133 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2134 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2135 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2136 }
2137 
2138 /*
2139  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2140  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2141  * pdev: DP PDEV handle
2142  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2143  * @ppdu_info: per ppdu tlv structure
2144  *
2145  * return:void
2146  */
2147 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2148 		struct dp_pdev *pdev, uint32_t *tag_buf,
2149 		struct ppdu_info *ppdu_info)
2150 {
2151 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2152 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2153 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2154 	struct cdp_tx_completion_ppdu *ppdu_desc;
2155 	uint8_t curr_user_index = 0;
2156 	uint16_t peer_id;
2157 	struct dp_peer *peer;
2158 
2159 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2160 
2161 	tag_buf++;
2162 
2163 	peer_id =
2164 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2165 
2166 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2167 
2168 	if (!peer)
2169 		return;
2170 
2171 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2172 
2173 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2174 	ppdu_user_desc->peer_id = peer_id;
2175 
2176 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2177 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2178 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2179 }
2180 
2181 /*
2182  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2183  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2184  * pdev: DP PDE handle
2185  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2186  * @ppdu_info: per ppdu tlv structure
2187  *
2188  * return:void
2189  */
2190 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2191 		struct dp_pdev *pdev, uint32_t *tag_buf,
2192 		struct ppdu_info *ppdu_info)
2193 {
2194 	uint16_t peer_id;
2195 	struct dp_peer *peer;
2196 	struct cdp_tx_completion_ppdu *ppdu_desc;
2197 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2198 	uint8_t curr_user_index = 0;
2199 
2200 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2201 
2202 	tag_buf += 2;
2203 	peer_id =
2204 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2205 
2206 
2207 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2208 
2209 	if (!peer)
2210 		return;
2211 
2212 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2213 
2214 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2215 	ppdu_user_desc->peer_id = peer_id;
2216 
2217 	tag_buf++;
2218 	ppdu_user_desc->tid =
2219 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2220 	ppdu_user_desc->num_mpdu =
2221 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2222 
2223 	ppdu_user_desc->num_msdu =
2224 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2225 
2226 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2227 
2228 	tag_buf += 2;
2229 	ppdu_user_desc->success_bytes = *tag_buf;
2230 
2231 }
2232 
2233 /*
2234  * dp_process_ppdu_stats_user_common_array_tlv: Process
2235  * htt_ppdu_stats_user_common_array_tlv
2236  * pdev: DP PDEV handle
2237  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2238  * @ppdu_info: per ppdu tlv structure
2239  *
2240  * return:void
2241  */
2242 static void dp_process_ppdu_stats_user_common_array_tlv(
2243 		struct dp_pdev *pdev, uint32_t *tag_buf,
2244 		struct ppdu_info *ppdu_info)
2245 {
2246 	uint32_t peer_id;
2247 	struct dp_peer *peer;
2248 	struct cdp_tx_completion_ppdu *ppdu_desc;
2249 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2250 	uint8_t curr_user_index = 0;
2251 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2252 
2253 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2254 
2255 	tag_buf++;
2256 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2257 	tag_buf += 3;
2258 	peer_id =
2259 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2260 
2261 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2262 
2263 	if (!peer) {
2264 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2265 			"Invalid peer");
2266 		return;
2267 	}
2268 
2269 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2270 
2271 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2272 
2273 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2274 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2275 
2276 	tag_buf++;
2277 
2278 	ppdu_user_desc->success_msdus =
2279 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2280 	ppdu_user_desc->retry_bytes =
2281 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2282 	tag_buf++;
2283 	ppdu_user_desc->failed_msdus =
2284 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2285 }
2286 
2287 /*
2288  * dp_process_ppdu_stats_flush_tlv: Process
2289  * htt_ppdu_stats_flush_tlv
2290  * @pdev: DP PDEV handle
2291  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2292  *
2293  * return:void
2294  */
2295 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2296 						uint32_t *tag_buf)
2297 {
2298 	uint32_t peer_id;
2299 	uint32_t drop_reason;
2300 	uint8_t tid;
2301 	uint32_t num_msdu;
2302 	struct dp_peer *peer;
2303 
2304 	tag_buf++;
2305 	drop_reason = *tag_buf;
2306 
2307 	tag_buf++;
2308 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2309 
2310 	tag_buf++;
2311 	peer_id =
2312 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2313 
2314 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2315 	if (!peer)
2316 		return;
2317 
2318 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2319 
2320 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2321 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2322 					num_msdu);
2323 	}
2324 }
2325 
2326 /*
2327  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2328  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2329  * @pdev: DP PDEV handle
2330  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2331  * @length: tlv_length
2332  *
2333  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2334  */
2335 static QDF_STATUS
2336 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2337 					      qdf_nbuf_t tag_buf,
2338 					      uint32_t ppdu_id)
2339 {
2340 	uint32_t *nbuf_ptr;
2341 	uint8_t trim_size;
2342 
2343 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2344 	    (!pdev->bpr_enable))
2345 		return QDF_STATUS_SUCCESS;
2346 
2347 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2348 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2349 		      qdf_nbuf_data(tag_buf));
2350 
2351 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2352 		return QDF_STATUS_SUCCESS;
2353 
2354 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2355 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2356 
2357 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2358 				tag_buf, sizeof(ppdu_id));
2359 	*nbuf_ptr = ppdu_id;
2360 
2361 	if (pdev->bpr_enable) {
2362 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2363 				     tag_buf, HTT_INVALID_PEER,
2364 				     WDI_NO_VAL, pdev->pdev_id);
2365 	}
2366 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2367 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2368 				     tag_buf, HTT_INVALID_PEER,
2369 				     WDI_NO_VAL, pdev->pdev_id);
2370 	}
2371 
2372 	return QDF_STATUS_E_ALREADY;
2373 }
2374 
2375 /**
2376  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2377  * @pdev: DP pdev handle
2378  * @tag_buf: TLV buffer
2379  * @tlv_len: length of tlv
2380  * @ppdu_info: per ppdu tlv structure
2381  *
2382  * return: void
2383  */
2384 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2385 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2386 {
2387 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2388 
2389 	switch (tlv_type) {
2390 	case HTT_PPDU_STATS_COMMON_TLV:
2391 		qdf_assert_always(tlv_len ==
2392 				sizeof(htt_ppdu_stats_common_tlv));
2393 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2394 		break;
2395 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2396 		qdf_assert_always(tlv_len ==
2397 				sizeof(htt_ppdu_stats_user_common_tlv));
2398 		dp_process_ppdu_stats_user_common_tlv(
2399 				pdev, tag_buf, ppdu_info);
2400 		break;
2401 	case HTT_PPDU_STATS_USR_RATE_TLV:
2402 		qdf_assert_always(tlv_len ==
2403 				sizeof(htt_ppdu_stats_user_rate_tlv));
2404 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2405 		break;
2406 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2407 		qdf_assert_always(tlv_len ==
2408 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2409 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2410 				pdev, tag_buf, ppdu_info);
2411 		break;
2412 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2413 		qdf_assert_always(tlv_len ==
2414 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2415 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2416 				pdev, tag_buf, ppdu_info);
2417 		break;
2418 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2419 		qdf_assert_always(tlv_len ==
2420 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2421 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2422 				pdev, tag_buf, ppdu_info);
2423 		break;
2424 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2425 		qdf_assert_always(tlv_len ==
2426 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2427 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2428 				pdev, tag_buf, ppdu_info);
2429 		break;
2430 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2431 		qdf_assert_always(tlv_len ==
2432 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2433 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2434 				pdev, tag_buf, ppdu_info);
2435 		break;
2436 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2437 		qdf_assert_always(tlv_len ==
2438 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2439 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2440 				pdev, tag_buf, ppdu_info);
2441 		break;
2442 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2443 		qdf_assert_always(tlv_len ==
2444 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2445 		dp_process_ppdu_stats_user_common_array_tlv(
2446 				pdev, tag_buf, ppdu_info);
2447 		break;
2448 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2449 		qdf_assert_always(tlv_len ==
2450 			sizeof(htt_ppdu_stats_flush_tlv));
2451 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2452 				pdev, tag_buf);
2453 		break;
2454 	default:
2455 		break;
2456 	}
2457 }
2458 
2459 /**
2460  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2461  * to upper layer
2462  * @pdev: DP pdev handle
2463  * @ppdu_info: per PPDU TLV descriptor
2464  *
2465  * return: void
2466  */
2467 static
2468 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2469 			      struct ppdu_info *ppdu_info)
2470 {
2471 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2472 	struct dp_peer *peer = NULL;
2473 	qdf_nbuf_t nbuf;
2474 	uint16_t i;
2475 
2476 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2477 		qdf_nbuf_data(ppdu_info->nbuf);
2478 
2479 	ppdu_desc->num_users = ppdu_info->last_user;
2480 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2481 
2482 	for (i = 0; i < ppdu_desc->num_users; i++) {
2483 
2484 
2485 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2486 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2487 
2488 		if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2489 			peer = dp_peer_find_by_id(pdev->soc,
2490 					ppdu_desc->user[i].peer_id);
2491 			/**
2492 			 * This check is to make sure peer is not deleted
2493 			 * after processing the TLVs.
2494 			 */
2495 			if (!peer)
2496 				continue;
2497 
2498 			dp_tx_stats_update(pdev->soc, peer,
2499 					&ppdu_desc->user[i],
2500 					ppdu_desc->ack_rssi);
2501 		}
2502 	}
2503 
2504 	/*
2505 	 * Remove from the list
2506 	 */
2507 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2508 	nbuf = ppdu_info->nbuf;
2509 	pdev->list_depth--;
2510 	qdf_mem_free(ppdu_info);
2511 
2512 	qdf_assert_always(nbuf);
2513 
2514 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2515 		qdf_nbuf_data(nbuf);
2516 
2517 	/**
2518 	 * Deliver PPDU stats only for valid (acked) data frames if
2519 	 * sniffer mode is not enabled.
2520 	 * If sniffer mode is enabled, PPDU stats for all frames
2521 	 * including mgmt/control frames should be delivered to upper layer
2522 	 */
2523 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2524 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2525 				nbuf, HTT_INVALID_PEER,
2526 				WDI_NO_VAL, pdev->pdev_id);
2527 	} else {
2528 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2529 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2530 
2531 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2532 					pdev->soc, nbuf, HTT_INVALID_PEER,
2533 					WDI_NO_VAL, pdev->pdev_id);
2534 		} else
2535 			qdf_nbuf_free(nbuf);
2536 	}
2537 	return;
2538 }
2539 
2540 /**
2541  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2542  * desc for new ppdu id
2543  * @pdev: DP pdev handle
2544  * @ppdu_id: PPDU unique identifier
2545  * @tlv_type: TLV type received
2546  *
2547  * return: ppdu_info per ppdu tlv structure
2548  */
2549 static
2550 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2551 			uint8_t tlv_type)
2552 {
2553 	struct ppdu_info *ppdu_info = NULL;
2554 
2555 	/*
2556 	 * Find ppdu_id node exists or not
2557 	 */
2558 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2559 
2560 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2561 			break;
2562 		}
2563 	}
2564 
2565 	if (ppdu_info) {
2566 		/**
2567 		 * if we get tlv_type that is already been processed for ppdu,
2568 		 * that means we got a new ppdu with same ppdu id.
2569 		 * Hence Flush the older ppdu
2570 		 */
2571 		if (ppdu_info->tlv_bitmap & (1 << tlv_type))
2572 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2573 		else
2574 			return ppdu_info;
2575 	}
2576 
2577 	/**
2578 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2579 	 * threshold
2580 	 */
2581 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2582 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2583 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2584 	}
2585 
2586 	/*
2587 	 * Allocate new ppdu_info node
2588 	 */
2589 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2590 	if (!ppdu_info)
2591 		return NULL;
2592 
2593 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2594 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2595 			TRUE);
2596 	if (!ppdu_info->nbuf) {
2597 		qdf_mem_free(ppdu_info);
2598 		return NULL;
2599 	}
2600 
2601 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2602 			sizeof(struct cdp_tx_completion_ppdu));
2603 
2604 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2605 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2606 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2607 				"No tailroom for HTT PPDU");
2608 		qdf_nbuf_free(ppdu_info->nbuf);
2609 		ppdu_info->nbuf = NULL;
2610 		ppdu_info->last_user = 0;
2611 		qdf_mem_free(ppdu_info);
2612 		return NULL;
2613 	}
2614 
2615 	/**
2616 	 * No lock is needed because all PPDU TLVs are processed in
2617 	 * same context and this list is updated in same context
2618 	 */
2619 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2620 			ppdu_info_list_elem);
2621 	pdev->list_depth++;
2622 	return ppdu_info;
2623 }
2624 
2625 /**
2626  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2627  * @pdev: DP pdev handle
2628  * @htt_t2h_msg: HTT target to host message
2629  *
2630  * return: ppdu_info per ppdu tlv structure
2631  */
2632 
2633 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2634 		qdf_nbuf_t htt_t2h_msg)
2635 {
2636 	uint32_t length;
2637 	uint32_t ppdu_id;
2638 	uint8_t tlv_type;
2639 	uint32_t tlv_length, tlv_bitmap_expected;
2640 	uint8_t *tlv_buf;
2641 	struct ppdu_info *ppdu_info = NULL;
2642 
2643 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2644 
2645 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2646 
2647 	msg_word = msg_word + 1;
2648 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2649 
2650 
2651 	msg_word = msg_word + 3;
2652 	while (length > 0) {
2653 		tlv_buf = (uint8_t *)msg_word;
2654 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2655 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2656 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2657 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2658 
2659 		if (tlv_length == 0)
2660 			break;
2661 
2662 		tlv_length += HTT_TLV_HDR_LEN;
2663 
2664 		/**
2665 		 * Not allocating separate ppdu descriptor for MGMT Payload
2666 		 * TLV as this is sent as separate WDI indication and it
2667 		 * doesn't contain any ppdu information
2668 		 */
2669 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2670 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
2671 			pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length;
2672 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
2673 			msg_word =
2674 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2675 			length -= (tlv_length);
2676 			continue;
2677 		}
2678 
2679 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2680 		if (!ppdu_info)
2681 			return NULL;
2682 		ppdu_info->ppdu_id = ppdu_id;
2683 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2684 
2685 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2686 
2687 		/**
2688 		 * Increment pdev level tlv count to monitor
2689 		 * missing TLVs
2690 		 */
2691 		pdev->tlv_count++;
2692 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2693 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2694 		length -= (tlv_length);
2695 	}
2696 
2697 	if (!ppdu_info)
2698 		return NULL;
2699 
2700 	pdev->last_ppdu_id = ppdu_id;
2701 
2702 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2703 
2704 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2705 		if (ppdu_info->is_ampdu)
2706 			tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP;
2707 	}
2708 
2709 	/**
2710 	 * Once all the TLVs for a given PPDU has been processed,
2711 	 * return PPDU status to be delivered to higher layer
2712 	 */
2713 	if (ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2714 		return ppdu_info;
2715 
2716 	return NULL;
2717 }
2718 #endif /* FEATURE_PERPKT_INFO */
2719 
2720 /**
2721  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2722  * @soc: DP SOC handle
2723  * @pdev_id: pdev id
2724  * @htt_t2h_msg: HTT message nbuf
2725  *
2726  * return:void
2727  */
2728 #if defined(WDI_EVENT_ENABLE)
2729 #ifdef FEATURE_PERPKT_INFO
2730 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2731 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2732 {
2733 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2734 	struct ppdu_info *ppdu_info = NULL;
2735 	bool free_buf = true;
2736 
2737 	if (!pdev)
2738 		return true;
2739 
2740 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2741 	    !pdev->mcopy_mode && !pdev->bpr_enable)
2742 		return free_buf;
2743 
2744 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
2745 
2746 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
2747 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
2748 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
2749 		    QDF_STATUS_SUCCESS)
2750 			free_buf = false;
2751 
2752 		pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
2753 		pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
2754 		pdev->mgmtctrl_frm_info.ppdu_id = 0;
2755 	}
2756 
2757 	if (ppdu_info)
2758 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2759 
2760 	return free_buf;
2761 }
2762 #else
2763 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2764 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2765 {
2766 	return true;
2767 }
2768 #endif
2769 #endif
2770 
2771 /**
2772  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2773  * @soc: DP SOC handle
2774  * @htt_t2h_msg: HTT message nbuf
2775  *
2776  * return:void
2777  */
2778 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2779 		qdf_nbuf_t htt_t2h_msg)
2780 {
2781 	uint8_t done;
2782 	qdf_nbuf_t msg_copy;
2783 	uint32_t *msg_word;
2784 
2785 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2786 	msg_word = msg_word + 3;
2787 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2788 
2789 	/*
2790 	 * HTT EXT stats response comes as stream of TLVs which span over
2791 	 * multiple T2H messages.
2792 	 * The first message will carry length of the response.
2793 	 * For rest of the messages length will be zero.
2794 	 *
2795 	 * Clone the T2H message buffer and store it in a list to process
2796 	 * it later.
2797 	 *
2798 	 * The original T2H message buffers gets freed in the T2H HTT event
2799 	 * handler
2800 	 */
2801 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2802 
2803 	if (!msg_copy) {
2804 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2805 				"T2H messge clone failed for HTT EXT STATS");
2806 		goto error;
2807 	}
2808 
2809 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2810 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2811 	/*
2812 	 * Done bit signifies that this is the last T2H buffer in the stream of
2813 	 * HTT EXT STATS message
2814 	 */
2815 	if (done) {
2816 		soc->htt_stats.num_stats++;
2817 		qdf_sched_work(0, &soc->htt_stats.work);
2818 	}
2819 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2820 
2821 	return;
2822 
2823 error:
2824 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2825 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2826 			!= NULL) {
2827 		qdf_nbuf_free(msg_copy);
2828 	}
2829 	soc->htt_stats.num_stats = 0;
2830 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2831 	return;
2832 
2833 }
2834 
2835 /*
2836  * htt_soc_attach_target() - SOC level HTT setup
2837  * @htt_soc:	HTT SOC handle
2838  *
2839  * Return: 0 on success; error code on failure
2840  */
2841 int htt_soc_attach_target(void *htt_soc)
2842 {
2843 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2844 
2845 	return htt_h2t_ver_req_msg(soc);
2846 }
2847 
2848 
2849 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2850 /*
2851  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2852  * @htt_soc:	 HTT SOC handle
2853  * @msg_word:    Pointer to payload
2854  * @htt_t2h_msg: HTT msg nbuf
2855  *
2856  * Return: True if buffer should be freed by caller.
2857  */
2858 static bool
2859 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2860 				uint32_t *msg_word,
2861 				qdf_nbuf_t htt_t2h_msg)
2862 {
2863 	u_int8_t pdev_id;
2864 	bool free_buf;
2865 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2866 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2867 		"received HTT_T2H_MSG_TYPE_PPDU_STATS_IND");
2868 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2869 	pdev_id = DP_HW2SW_MACID(pdev_id);
2870 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2871 					      htt_t2h_msg);
2872 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2873 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2874 		pdev_id);
2875 	return free_buf;
2876 }
2877 #else
2878 static bool
2879 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2880 				uint32_t *msg_word,
2881 				qdf_nbuf_t htt_t2h_msg)
2882 {
2883 	return true;
2884 }
2885 #endif
2886 
2887 #if defined(WDI_EVENT_ENABLE) && \
2888 		!defined(REMOVE_PKT_LOG) && defined(CONFIG_WIN)
2889 /*
2890  * dp_pktlog_msg_handler() - Pktlog msg handler
2891  * @htt_soc:	 HTT SOC handle
2892  * @msg_word:    Pointer to payload
2893  *
2894  * Return: None
2895  */
2896 static void
2897 dp_pktlog_msg_handler(struct htt_soc *soc,
2898 				uint32_t *msg_word)
2899 {
2900 	uint8_t pdev_id;
2901 	uint32_t *pl_hdr;
2902 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2903 		"received HTT_T2H_MSG_TYPE_PKTLOG");
2904 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2905 	pdev_id = DP_HW2SW_MACID(pdev_id);
2906 	pl_hdr = (msg_word + 1);
2907 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2908 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2909 		pdev_id);
2910 }
2911 #else
2912 static void
2913 dp_pktlog_msg_handler(struct htt_soc *soc,
2914 				uint32_t *msg_word)
2915 {
2916 }
2917 #endif
2918 
2919 /*
2920  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2921  * @context:	Opaque context (HTT SOC handle)
2922  * @pkt:	HTC packet
2923  */
2924 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2925 {
2926 	struct htt_soc *soc = (struct htt_soc *) context;
2927 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2928 	u_int32_t *msg_word;
2929 	enum htt_t2h_msg_type msg_type;
2930 	bool free_buf = true;
2931 
2932 	/* check for successful message reception */
2933 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2934 		if (pkt->Status != QDF_STATUS_E_CANCELED)
2935 			soc->stats.htc_err_cnt++;
2936 
2937 		qdf_nbuf_free(htt_t2h_msg);
2938 		return;
2939 	}
2940 
2941 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
2942 
2943 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
2944 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2945 	switch (msg_type) {
2946 	case HTT_T2H_MSG_TYPE_PEER_MAP:
2947 		{
2948 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
2949 			u_int8_t *peer_mac_addr;
2950 			u_int16_t peer_id;
2951 			u_int16_t hw_peer_id;
2952 			u_int8_t vdev_id;
2953 
2954 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
2955 			hw_peer_id =
2956 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
2957 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
2958 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
2959 				(u_int8_t *) (msg_word+1),
2960 				&mac_addr_deswizzle_buf[0]);
2961 			QDF_TRACE(QDF_MODULE_ID_TXRX,
2962 				QDF_TRACE_LEVEL_INFO,
2963 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
2964 				peer_id, vdev_id);
2965 
2966 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
2967 						vdev_id, peer_mac_addr);
2968 			break;
2969 		}
2970 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
2971 		{
2972 			u_int16_t peer_id;
2973 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
2974 
2975 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id);
2976 			break;
2977 		}
2978 	case HTT_T2H_MSG_TYPE_SEC_IND:
2979 		{
2980 			u_int16_t peer_id;
2981 			enum htt_sec_type sec_type;
2982 			int is_unicast;
2983 
2984 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
2985 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
2986 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
2987 			/* point to the first part of the Michael key */
2988 			msg_word++;
2989 			dp_rx_sec_ind_handler(
2990 				soc->dp_soc, peer_id, sec_type, is_unicast,
2991 				msg_word, msg_word + 2);
2992 			break;
2993 		}
2994 
2995 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
2996 		{
2997 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
2998 							     htt_t2h_msg);
2999 			break;
3000 		}
3001 
3002 	case HTT_T2H_MSG_TYPE_PKTLOG:
3003 		{
3004 			dp_pktlog_msg_handler(soc, msg_word);
3005 			break;
3006 		}
3007 
3008 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3009 		{
3010 			htc_pm_runtime_put(soc->htc_soc);
3011 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3012 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3013 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3014 				"target uses HTT version %d.%d; host uses %d.%d",
3015 				soc->tgt_ver.major, soc->tgt_ver.minor,
3016 				HTT_CURRENT_VERSION_MAJOR,
3017 				HTT_CURRENT_VERSION_MINOR);
3018 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3019 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3020 					QDF_TRACE_LEVEL_ERROR,
3021 					"*** Incompatible host/target HTT versions!");
3022 			}
3023 			/* abort if the target is incompatible with the host */
3024 			qdf_assert(soc->tgt_ver.major ==
3025 				HTT_CURRENT_VERSION_MAJOR);
3026 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3027 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3028 					QDF_TRACE_LEVEL_WARN,
3029 					"*** Warning: host/target HTT versions"
3030 					" are different, though compatible!");
3031 			}
3032 			break;
3033 		}
3034 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3035 		{
3036 			uint16_t peer_id;
3037 			uint8_t tid;
3038 			uint8_t win_sz;
3039 			uint16_t status;
3040 			struct dp_peer *peer;
3041 
3042 			/*
3043 			 * Update REO Queue Desc with new values
3044 			 */
3045 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3046 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3047 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3048 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3049 
3050 			/*
3051 			 * Window size needs to be incremented by 1
3052 			 * since fw needs to represent a value of 256
3053 			 * using just 8 bits
3054 			 */
3055 			if (peer) {
3056 				status = dp_addba_requestprocess_wifi3(peer,
3057 						0, tid, 0, win_sz + 1, 0xffff);
3058 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3059 					QDF_TRACE_LEVEL_INFO,
3060 					FL("PeerID %d BAW %d TID %d stat %d"),
3061 					peer_id, win_sz, tid, status);
3062 
3063 			} else {
3064 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3065 					QDF_TRACE_LEVEL_ERROR,
3066 					FL("Peer not found peer id %d"),
3067 					peer_id);
3068 			}
3069 			break;
3070 		}
3071 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3072 		{
3073 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3074 			break;
3075 		}
3076 	default:
3077 		break;
3078 	};
3079 
3080 	/* Free the indication buffer */
3081 	if (free_buf)
3082 		qdf_nbuf_free(htt_t2h_msg);
3083 }
3084 
3085 /*
3086  * dp_htt_h2t_full() - Send full handler (called from HTC)
3087  * @context:	Opaque context (HTT SOC handle)
3088  * @pkt:	HTC packet
3089  *
3090  * Return: enum htc_send_full_action
3091  */
3092 static enum htc_send_full_action
3093 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3094 {
3095 	return HTC_SEND_FULL_KEEP;
3096 }
3097 
3098 /*
3099  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3100  * @context:	Opaque context (HTT SOC handle)
3101  * @nbuf:	nbuf containing T2H message
3102  * @pipe_id:	HIF pipe ID
3103  *
3104  * Return: QDF_STATUS
3105  *
3106  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3107  * will be used for packet log and other high-priority HTT messages. Proper
3108  * HTC connection to be added later once required FW changes are available
3109  */
3110 static QDF_STATUS
3111 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3112 {
3113 	A_STATUS rc = QDF_STATUS_SUCCESS;
3114 	HTC_PACKET htc_pkt;
3115 
3116 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3117 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3118 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3119 	htc_pkt.pPktContext = (void *)nbuf;
3120 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3121 
3122 	return rc;
3123 }
3124 
3125 /*
3126  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3127  * @htt_soc:	HTT SOC handle
3128  *
3129  * Return: 0 on success; error code on failure
3130  */
3131 static int
3132 htt_htc_soc_attach(struct htt_soc *soc)
3133 {
3134 	struct htc_service_connect_req connect;
3135 	struct htc_service_connect_resp response;
3136 	A_STATUS status;
3137 	struct dp_soc *dpsoc = soc->dp_soc;
3138 
3139 	qdf_mem_set(&connect, sizeof(connect), 0);
3140 	qdf_mem_set(&response, sizeof(response), 0);
3141 
3142 	connect.pMetaData = NULL;
3143 	connect.MetaDataLength = 0;
3144 	connect.EpCallbacks.pContext = soc;
3145 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3146 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3147 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3148 
3149 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3150 	connect.EpCallbacks.EpRecvRefill = NULL;
3151 
3152 	/* N/A, fill is done by HIF */
3153 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3154 
3155 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3156 	/*
3157 	 * Specify how deep to let a queue get before htc_send_pkt will
3158 	 * call the EpSendFull function due to excessive send queue depth.
3159 	 */
3160 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3161 
3162 	/* disable flow control for HTT data message service */
3163 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3164 
3165 	/* connect to control service */
3166 	connect.service_id = HTT_DATA_MSG_SVC;
3167 
3168 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3169 
3170 	if (status != A_OK)
3171 		return QDF_STATUS_E_FAILURE;
3172 
3173 	soc->htc_endpoint = response.Endpoint;
3174 
3175 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3176 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3177 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3178 
3179 	return 0; /* success */
3180 }
3181 
3182 /*
3183  * htt_soc_attach() - SOC level HTT initialization
3184  * @dp_soc:	Opaque Data path SOC handle
3185  * @ctrl_psoc:	Opaque ctrl SOC handle
3186  * @htc_soc:	SOC level HTC handle
3187  * @hal_soc:	Opaque HAL SOC handle
3188  * @osdev:	QDF device
3189  *
3190  * Return: HTT handle on success; NULL on failure
3191  */
3192 void *
3193 htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3194 	void *hal_soc, qdf_device_t osdev)
3195 {
3196 	struct htt_soc *soc;
3197 	int i;
3198 
3199 	soc = qdf_mem_malloc(sizeof(*soc));
3200 
3201 	if (!soc)
3202 		goto fail1;
3203 
3204 	soc->osdev = osdev;
3205 	soc->ctrl_psoc = ctrl_psoc;
3206 	soc->dp_soc = dp_soc;
3207 	soc->htc_soc = htc_soc;
3208 	soc->hal_soc = hal_soc;
3209 
3210 	/* TODO: See if any NSS related context is required in htt_soc */
3211 
3212 	soc->htt_htc_pkt_freelist = NULL;
3213 
3214 	if (htt_htc_soc_attach(soc))
3215 		goto fail2;
3216 
3217 	/* TODO: See if any Rx data specific intialization is required. For
3218 	 * MCL use cases, the data will be received as single packet and
3219 	 * should not required any descriptor or reorder handling
3220 	 */
3221 
3222 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3223 
3224 	/* pre-allocate some HTC_PACKET objects */
3225 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3226 		struct dp_htt_htc_pkt_union *pkt;
3227 		pkt = qdf_mem_malloc(sizeof(*pkt));
3228 		if (!pkt)
3229 			break;
3230 
3231 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3232 	}
3233 
3234 	return soc;
3235 
3236 fail2:
3237 	qdf_mem_free(soc);
3238 
3239 fail1:
3240 	return NULL;
3241 }
3242 
3243 
3244 /*
3245  * htt_soc_detach() - Detach SOC level HTT
3246  * @htt_soc:	HTT SOC handle
3247  */
3248 void
3249 htt_soc_detach(void *htt_soc)
3250 {
3251 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3252 
3253 	htt_htc_misc_pkt_pool_free(soc);
3254 	htt_htc_pkt_pool_free(soc);
3255 	HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex);
3256 	qdf_mem_free(soc);
3257 }
3258 
3259 /**
3260  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3261  * @pdev: DP PDEV handle
3262  * @stats_type_upload_mask: stats type requested by user
3263  * @config_param_0: extra configuration parameters
3264  * @config_param_1: extra configuration parameters
3265  * @config_param_2: extra configuration parameters
3266  * @config_param_3: extra configuration parameters
3267  * @mac_id: mac number
3268  *
3269  * return: QDF STATUS
3270  */
3271 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3272 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3273 		uint32_t config_param_1, uint32_t config_param_2,
3274 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3275 		uint8_t mac_id)
3276 {
3277 	struct htt_soc *soc = pdev->soc->htt_handle;
3278 	struct dp_htt_htc_pkt *pkt;
3279 	qdf_nbuf_t msg;
3280 	uint32_t *msg_word;
3281 	uint8_t pdev_mask = 0;
3282 
3283 	msg = qdf_nbuf_alloc(
3284 			soc->osdev,
3285 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3286 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3287 
3288 	if (!msg)
3289 		return QDF_STATUS_E_NOMEM;
3290 
3291 	/*TODO:Add support for SOC stats
3292 	 * Bit 0: SOC Stats
3293 	 * Bit 1: Pdev stats for pdev id 0
3294 	 * Bit 2: Pdev stats for pdev id 1
3295 	 * Bit 3: Pdev stats for pdev id 2
3296 	 */
3297 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3298 
3299 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3300 	/*
3301 	 * Set the length of the message.
3302 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3303 	 * separately during the below call to qdf_nbuf_push_head.
3304 	 * The contribution from the HTC header is added separately inside HTC.
3305 	 */
3306 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3307 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3308 				"Failed to expand head for HTT_EXT_STATS");
3309 		qdf_nbuf_free(msg);
3310 		return QDF_STATUS_E_FAILURE;
3311 	}
3312 
3313 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3314 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3315 		"config_param_1 %u\n config_param_2 %u\n"
3316 		"config_param_4 %u\n -------------",
3317 		__func__, __LINE__, cookie_val, config_param_0,
3318 		config_param_1, config_param_2,	config_param_3);
3319 
3320 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3321 
3322 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3323 	*msg_word = 0;
3324 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3325 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3326 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3327 
3328 	/* word 1 */
3329 	msg_word++;
3330 	*msg_word = 0;
3331 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3332 
3333 	/* word 2 */
3334 	msg_word++;
3335 	*msg_word = 0;
3336 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3337 
3338 	/* word 3 */
3339 	msg_word++;
3340 	*msg_word = 0;
3341 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3342 
3343 	/* word 4 */
3344 	msg_word++;
3345 	*msg_word = 0;
3346 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3347 
3348 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3349 
3350 	/* word 5 */
3351 	msg_word++;
3352 
3353 	/* word 6 */
3354 	msg_word++;
3355 	*msg_word = 0;
3356 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3357 
3358 	/* word 7 */
3359 	msg_word++;
3360 	*msg_word = 0;
3361 	/*Using last 2 bits for pdev_id */
3362 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3363 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3364 
3365 	pkt = htt_htc_pkt_alloc(soc);
3366 	if (!pkt) {
3367 		qdf_nbuf_free(msg);
3368 		return QDF_STATUS_E_NOMEM;
3369 	}
3370 
3371 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3372 
3373 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3374 			dp_htt_h2t_send_complete_free_netbuf,
3375 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3376 			soc->htc_endpoint,
3377 			1); /* tag - not relevant here */
3378 
3379 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3380 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3381 	return 0;
3382 }
3383 
3384 /* This macro will revert once proper HTT header will define for
3385  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3386  * */
3387 #if defined(WDI_EVENT_ENABLE)
3388 /**
3389  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3390  * @pdev: DP PDEV handle
3391  * @stats_type_upload_mask: stats type requested by user
3392  * @mac_id: Mac id number
3393  *
3394  * return: QDF STATUS
3395  */
3396 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3397 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3398 {
3399 	struct htt_soc *soc = pdev->soc->htt_handle;
3400 	struct dp_htt_htc_pkt *pkt;
3401 	qdf_nbuf_t msg;
3402 	uint32_t *msg_word;
3403 	uint8_t pdev_mask;
3404 
3405 	msg = qdf_nbuf_alloc(
3406 			soc->osdev,
3407 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3408 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3409 
3410 	if (!msg) {
3411 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3412 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
3413 		qdf_assert(0);
3414 		return QDF_STATUS_E_NOMEM;
3415 	}
3416 
3417 	/*TODO:Add support for SOC stats
3418 	 * Bit 0: SOC Stats
3419 	 * Bit 1: Pdev stats for pdev id 0
3420 	 * Bit 2: Pdev stats for pdev id 1
3421 	 * Bit 3: Pdev stats for pdev id 2
3422 	 */
3423 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3424 
3425 	/*
3426 	 * Set the length of the message.
3427 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3428 	 * separately during the below call to qdf_nbuf_push_head.
3429 	 * The contribution from the HTC header is added separately inside HTC.
3430 	 */
3431 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3432 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3433 				"Failed to expand head for HTT_CFG_STATS");
3434 		qdf_nbuf_free(msg);
3435 		return QDF_STATUS_E_FAILURE;
3436 	}
3437 
3438 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3439 
3440 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3441 	*msg_word = 0;
3442 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3443 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3444 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3445 			stats_type_upload_mask);
3446 
3447 	pkt = htt_htc_pkt_alloc(soc);
3448 	if (!pkt) {
3449 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3450 				"Fail to allocate dp_htt_htc_pkt buffer");
3451 		qdf_assert(0);
3452 		qdf_nbuf_free(msg);
3453 		return QDF_STATUS_E_NOMEM;
3454 	}
3455 
3456 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3457 
3458 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3459 			dp_htt_h2t_send_complete_free_netbuf,
3460 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3461 			soc->htc_endpoint,
3462 			1); /* tag - not relevant here */
3463 
3464 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3465 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3466 	return 0;
3467 }
3468 #endif
3469