xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 87a8e4458319c60b618522e263ed900e36aab528)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_htt.h"
23 #include "dp_peer.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_rx_mon.h"
27 #include "htt_stats.h"
28 #include "htt_ppdu_stats.h"
29 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
30 #include "cdp_txrx_cmn_struct.h"
31 
32 #ifdef FEATURE_PERPKT_INFO
33 #include "dp_ratetable.h"
34 #endif
35 
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 
38 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
39 #define HTT_T2H_MAX_MSG_SIZE 2048
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
48 do {                                                             \
49 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
50 					QDF_STATUS_SUCCESS)      \
51 		htt_htc_misc_pkt_list_add(soc, pkt);             \
52 } while (0)
53 
54 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
55 
56 /**
57  * Bitmap of HTT PPDU TLV types for Default mode
58  */
59 #define HTT_PPDU_DEFAULT_TLV_BITMAP \
60 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
61 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
62 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
63 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
64 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
65 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
66 
67 /**
68  * Bitmap of HTT PPDU TLV types for Sniffer mode
69  */
70 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP \
71 	(1 << HTT_PPDU_STATS_COMMON_TLV) | \
72 	(1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \
73 	(1 << HTT_PPDU_STATS_USR_RATE_TLV) | \
74 	(1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \
75 	(1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \
76 	(1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \
77 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \
78 	(1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \
79 	(1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV)
80 
81 #define HTT_FRAMECTRL_DATATYPE 0x08
82 #define HTT_PPDU_DESC_MAX_DEPTH 16
83 #define DP_SCAN_PEER_ID 0xFFFF
84 
85 /*
86  * dp_tx_stats_update() - Update per-peer statistics
87  * @soc: Datapath soc handle
88  * @peer: Datapath peer handle
89  * @ppdu: PPDU Descriptor
90  * @ack_rssi: RSSI of last ack received
91  *
92  * Return: None
93  */
94 #ifdef FEATURE_PERPKT_INFO
95 static inline void
96 dp_tx_rate_stats_update(struct dp_peer *peer,
97 			struct cdp_tx_completion_ppdu_user *ppdu)
98 {
99 	uint32_t ratekbps = 0;
100 	uint32_t ppdu_tx_rate = 0;
101 
102 	if (!peer || !ppdu)
103 		return;
104 
105 	dp_peer_stats_notify(peer);
106 
107 	ratekbps = dp_getrateindex(ppdu->mcs,
108 				   ppdu->nss,
109 				   ppdu->preamble,
110 				   ppdu->bw);
111 
112 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
113 
114 	if (!ratekbps)
115 		return;
116 
117 	dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
118 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
119 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
120 
121 	if (peer->vdev) {
122 		peer->vdev->stats.tx.last_tx_rate = ratekbps;
123 		peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
124 	}
125 }
126 
127 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
128 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
129 {
130 	struct dp_pdev *pdev = peer->vdev->pdev;
131 	uint8_t preamble, mcs;
132 	uint16_t num_msdu;
133 
134 	preamble = ppdu->preamble;
135 	mcs = ppdu->mcs;
136 	num_msdu = ppdu->num_msdu;
137 
138 	/* If the peer statistics are already processed as part of
139 	 * per-MSDU completion handler, do not process these again in per-PPDU
140 	 * indications */
141 	if (soc->process_tx_status)
142 		return;
143 
144 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
145 			num_msdu, (ppdu->success_bytes +
146 				ppdu->retry_bytes + ppdu->failed_bytes));
147 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
148 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
149 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
150 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
151 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
152 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
153 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
154 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
155 	if (!(ppdu->is_mcast))
156 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
157 
158 	DP_STATS_INC(peer, tx.retries,
159 			(ppdu->long_retries + ppdu->short_retries));
160 	DP_STATS_INCC(peer,
161 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
162 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
163 	DP_STATS_INCC(peer,
164 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
165 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
166 	DP_STATS_INCC(peer,
167 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
168 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
169 	DP_STATS_INCC(peer,
170 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
171 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
172 	DP_STATS_INCC(peer,
173 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
174 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
175 	DP_STATS_INCC(peer,
176 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
177 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
178 	DP_STATS_INCC(peer,
179 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
180 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
181 	DP_STATS_INCC(peer,
182 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
183 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
184 	DP_STATS_INCC(peer,
185 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
186 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
187 	DP_STATS_INCC(peer,
188 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
189 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
190 
191 	dp_tx_rate_stats_update(peer, ppdu);
192 
193 	if (peer->stats.tx.ucast.num)
194 		peer->stats.tx.last_per = ((peer->stats.tx.ucast.num -
195 					peer->stats.tx.tx_success.num) * 100) /
196 					peer->stats.tx.ucast.num;
197 
198 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
199 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
200 				&peer->stats, ppdu->peer_id,
201 				UPDATE_PEER_STATS);
202 
203 	}
204 }
205 #endif
206 
207 /*
208  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
209  * @htt_soc:	HTT SOC handle
210  *
211  * Return: Pointer to htc packet buffer
212  */
213 static struct dp_htt_htc_pkt *
214 htt_htc_pkt_alloc(struct htt_soc *soc)
215 {
216 	struct dp_htt_htc_pkt_union *pkt = NULL;
217 
218 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
219 	if (soc->htt_htc_pkt_freelist) {
220 		pkt = soc->htt_htc_pkt_freelist;
221 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
222 	}
223 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
224 
225 	if (pkt == NULL)
226 		pkt = qdf_mem_malloc(sizeof(*pkt));
227 	return &pkt->u.pkt; /* not actually a dereference */
228 }
229 
230 /*
231  * htt_htc_pkt_free() - Free HTC packet buffer
232  * @htt_soc:	HTT SOC handle
233  */
234 static void
235 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
236 {
237 	struct dp_htt_htc_pkt_union *u_pkt =
238 		(struct dp_htt_htc_pkt_union *)pkt;
239 
240 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
241 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
242 	soc->htt_htc_pkt_freelist = u_pkt;
243 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
244 }
245 
246 /*
247  * htt_htc_pkt_pool_free() - Free HTC packet pool
248  * @htt_soc:	HTT SOC handle
249  */
250 static void
251 htt_htc_pkt_pool_free(struct htt_soc *soc)
252 {
253 	struct dp_htt_htc_pkt_union *pkt, *next;
254 	pkt = soc->htt_htc_pkt_freelist;
255 	while (pkt) {
256 		next = pkt->u.next;
257 		qdf_mem_free(pkt);
258 		pkt = next;
259 	}
260 	soc->htt_htc_pkt_freelist = NULL;
261 }
262 
263 /*
264  * htt_htc_misc_pkt_list_trim() - trim misc list
265  * @htt_soc: HTT SOC handle
266  * @level: max no. of pkts in list
267  */
268 static void
269 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
270 {
271 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
272 	int i = 0;
273 	qdf_nbuf_t netbuf;
274 
275 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
276 	pkt = soc->htt_htc_pkt_misclist;
277 	while (pkt) {
278 		next = pkt->u.next;
279 		/* trim the out grown list*/
280 		if (++i > level) {
281 			netbuf =
282 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
283 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
284 			qdf_nbuf_free(netbuf);
285 			qdf_mem_free(pkt);
286 			pkt = NULL;
287 			if (prev)
288 				prev->u.next = NULL;
289 		}
290 		prev = pkt;
291 		pkt = next;
292 	}
293 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
294 }
295 
296 /*
297  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
298  * @htt_soc:	HTT SOC handle
299  * @dp_htt_htc_pkt: pkt to be added to list
300  */
301 static void
302 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
303 {
304 	struct dp_htt_htc_pkt_union *u_pkt =
305 				(struct dp_htt_htc_pkt_union *)pkt;
306 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
307 							pkt->htc_pkt.Endpoint)
308 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
309 
310 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
311 	if (soc->htt_htc_pkt_misclist) {
312 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
313 		soc->htt_htc_pkt_misclist = u_pkt;
314 	} else {
315 		soc->htt_htc_pkt_misclist = u_pkt;
316 	}
317 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
318 
319 	/* only ce pipe size + tx_queue_depth could possibly be in use
320 	 * free older packets in the misclist
321 	 */
322 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
323 }
324 
325 /*
326  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
327  * @htt_soc:	HTT SOC handle
328  */
329 static void
330 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
331 {
332 	struct dp_htt_htc_pkt_union *pkt, *next;
333 	qdf_nbuf_t netbuf;
334 
335 	pkt = soc->htt_htc_pkt_misclist;
336 
337 	while (pkt) {
338 		next = pkt->u.next;
339 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
340 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
341 
342 		soc->stats.htc_pkt_free++;
343 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
344 			 "%s: Pkt free count %d",
345 			 __func__, soc->stats.htc_pkt_free);
346 
347 		qdf_nbuf_free(netbuf);
348 		qdf_mem_free(pkt);
349 		pkt = next;
350 	}
351 	soc->htt_htc_pkt_misclist = NULL;
352 }
353 
354 /*
355  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
356  * @tgt_mac_addr:	Target MAC
357  * @buffer:		Output buffer
358  */
359 static u_int8_t *
360 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
361 {
362 #ifdef BIG_ENDIAN_HOST
363 	/*
364 	 * The host endianness is opposite of the target endianness.
365 	 * To make u_int32_t elements come out correctly, the target->host
366 	 * upload has swizzled the bytes in each u_int32_t element of the
367 	 * message.
368 	 * For byte-array message fields like the MAC address, this
369 	 * upload swizzling puts the bytes in the wrong order, and needs
370 	 * to be undone.
371 	 */
372 	buffer[0] = tgt_mac_addr[3];
373 	buffer[1] = tgt_mac_addr[2];
374 	buffer[2] = tgt_mac_addr[1];
375 	buffer[3] = tgt_mac_addr[0];
376 	buffer[4] = tgt_mac_addr[7];
377 	buffer[5] = tgt_mac_addr[6];
378 	return buffer;
379 #else
380 	/*
381 	 * The host endianness matches the target endianness -
382 	 * we can use the mac addr directly from the message buffer.
383 	 */
384 	return tgt_mac_addr;
385 #endif
386 }
387 
388 /*
389  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
390  * @soc:	SOC handle
391  * @status:	Completion status
392  * @netbuf:	HTT buffer
393  */
394 static void
395 dp_htt_h2t_send_complete_free_netbuf(
396 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
397 {
398 	qdf_nbuf_free(netbuf);
399 }
400 
401 /*
402  * dp_htt_h2t_send_complete() - H2T completion handler
403  * @context:	Opaque context (HTT SOC handle)
404  * @htc_pkt:	HTC packet
405  */
406 static void
407 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
408 {
409 	void (*send_complete_part2)(
410 		void *soc, A_STATUS status, qdf_nbuf_t msdu);
411 	struct htt_soc *soc =  (struct htt_soc *) context;
412 	struct dp_htt_htc_pkt *htt_pkt;
413 	qdf_nbuf_t netbuf;
414 
415 	send_complete_part2 = htc_pkt->pPktContext;
416 
417 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
418 
419 	/* process (free or keep) the netbuf that held the message */
420 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
421 	/*
422 	 * adf sendcomplete is required for windows only
423 	 */
424 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
425 	if (send_complete_part2 != NULL) {
426 		send_complete_part2(
427 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
428 	}
429 	/* free the htt_htc_pkt / HTC_PACKET object */
430 	htt_htc_pkt_free(soc, htt_pkt);
431 }
432 
433 /*
434  * htt_h2t_ver_req_msg() - Send HTT version request message to target
435  * @htt_soc:	HTT SOC handle
436  *
437  * Return: 0 on success; error code on failure
438  */
439 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
440 {
441 	struct dp_htt_htc_pkt *pkt;
442 	qdf_nbuf_t msg;
443 	uint32_t *msg_word;
444 
445 	msg = qdf_nbuf_alloc(
446 		soc->osdev,
447 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
448 		/* reserve room for the HTC header */
449 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
450 	if (!msg)
451 		return QDF_STATUS_E_NOMEM;
452 
453 	/*
454 	 * Set the length of the message.
455 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
456 	 * separately during the below call to qdf_nbuf_push_head.
457 	 * The contribution from the HTC header is added separately inside HTC.
458 	 */
459 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
460 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
461 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
462 			__func__);
463 		return QDF_STATUS_E_FAILURE;
464 	}
465 
466 	/* fill in the message contents */
467 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
468 
469 	/* rewind beyond alignment pad to get to the HTC header reserved area */
470 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
471 
472 	*msg_word = 0;
473 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
474 
475 	pkt = htt_htc_pkt_alloc(soc);
476 	if (!pkt) {
477 		qdf_nbuf_free(msg);
478 		return QDF_STATUS_E_FAILURE;
479 	}
480 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
481 
482 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
483 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
484 		qdf_nbuf_len(msg), soc->htc_endpoint,
485 		1); /* tag - not relevant here */
486 
487 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
488 	DP_HTT_SEND_HTC_PKT(soc, pkt);
489 	return 0;
490 }
491 
492 /*
493  * htt_srng_setup() - Send SRNG setup message to target
494  * @htt_soc:	HTT SOC handle
495  * @mac_id:	MAC Id
496  * @hal_srng:	Opaque HAL SRNG pointer
497  * @hal_ring_type:	SRNG ring type
498  *
499  * Return: 0 on success; error code on failure
500  */
501 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
502 	int hal_ring_type)
503 {
504 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
505 	struct dp_htt_htc_pkt *pkt;
506 	qdf_nbuf_t htt_msg;
507 	uint32_t *msg_word;
508 	struct hal_srng_params srng_params;
509 	qdf_dma_addr_t hp_addr, tp_addr;
510 	uint32_t ring_entry_size =
511 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
512 	int htt_ring_type, htt_ring_id;
513 
514 	/* Sizes should be set in 4-byte words */
515 	ring_entry_size = ring_entry_size >> 2;
516 
517 	htt_msg = qdf_nbuf_alloc(soc->osdev,
518 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
519 		/* reserve room for the HTC header */
520 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
521 	if (!htt_msg)
522 		goto fail0;
523 
524 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
525 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
526 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
527 
528 	switch (hal_ring_type) {
529 	case RXDMA_BUF:
530 #ifdef QCA_HOST2FW_RXBUF_RING
531 		if (srng_params.ring_id ==
532 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
533 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
534 			htt_ring_type = HTT_SW_TO_SW_RING;
535 #ifdef IPA_OFFLOAD
536 		} else if (srng_params.ring_id ==
537 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
538 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
539 			htt_ring_type = HTT_SW_TO_SW_RING;
540 #endif
541 #else
542 		if (srng_params.ring_id ==
543 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
544 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
545 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
546 			htt_ring_type = HTT_SW_TO_HW_RING;
547 #endif
548 		} else if (srng_params.ring_id ==
549 #ifdef IPA_OFFLOAD
550 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
551 #else
552 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
553 #endif
554 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
555 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
556 			htt_ring_type = HTT_SW_TO_HW_RING;
557 		} else {
558 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
559 				   "%s: Ring %d currently not supported",
560 				   __func__, srng_params.ring_id);
561 			goto fail1;
562 		}
563 
564 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
565 			  "%s: ring_type %d ring_id %d",
566 			  __func__, hal_ring_type, srng_params.ring_id);
567 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
568 			  "%s: hp_addr 0x%llx tp_addr 0x%llx",
569 			  __func__, (uint64_t)hp_addr, (uint64_t)tp_addr);
570 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
571 			  "%s: htt_ring_id %d", __func__, htt_ring_id);
572 		break;
573 	case RXDMA_MONITOR_BUF:
574 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
575 		htt_ring_type = HTT_SW_TO_HW_RING;
576 		break;
577 	case RXDMA_MONITOR_STATUS:
578 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
579 		htt_ring_type = HTT_SW_TO_HW_RING;
580 		break;
581 	case RXDMA_MONITOR_DST:
582 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
583 		htt_ring_type = HTT_HW_TO_SW_RING;
584 		break;
585 	case RXDMA_MONITOR_DESC:
586 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
587 		htt_ring_type = HTT_SW_TO_HW_RING;
588 		break;
589 	case RXDMA_DST:
590 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
591 		htt_ring_type = HTT_HW_TO_SW_RING;
592 		break;
593 
594 	default:
595 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
596 			"%s: Ring currently not supported", __func__);
597 			goto fail1;
598 	}
599 
600 	/*
601 	 * Set the length of the message.
602 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
603 	 * separately during the below call to qdf_nbuf_push_head.
604 	 * The contribution from the HTC header is added separately inside HTC.
605 	 */
606 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
607 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
608 			"%s: Failed to expand head for SRING_SETUP msg",
609 			__func__);
610 		return QDF_STATUS_E_FAILURE;
611 	}
612 
613 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
614 
615 	/* rewind beyond alignment pad to get to the HTC header reserved area */
616 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
617 
618 	/* word 0 */
619 	*msg_word = 0;
620 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
621 
622 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
623 			(htt_ring_type == HTT_HW_TO_SW_RING))
624 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
625 			 DP_SW2HW_MACID(mac_id));
626 	else
627 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
628 
629 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
630 		  "%s: mac_id %d", __func__, mac_id);
631 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
632 	/* TODO: Discuss with FW on changing this to unique ID and using
633 	 * htt_ring_type to send the type of ring
634 	 */
635 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
636 
637 	/* word 1 */
638 	msg_word++;
639 	*msg_word = 0;
640 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
641 		srng_params.ring_base_paddr & 0xffffffff);
642 
643 	/* word 2 */
644 	msg_word++;
645 	*msg_word = 0;
646 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
647 		(uint64_t)srng_params.ring_base_paddr >> 32);
648 
649 	/* word 3 */
650 	msg_word++;
651 	*msg_word = 0;
652 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
653 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
654 		(ring_entry_size * srng_params.num_entries));
655 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
656 		  "%s: entry_size %d", __func__,
657 			 ring_entry_size);
658 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
659 		  "%s: num_entries %d", __func__,
660 			 srng_params.num_entries);
661 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
662 		  "%s: ring_size %d", __func__,
663 			 (ring_entry_size * srng_params.num_entries));
664 	if (htt_ring_type == HTT_SW_TO_HW_RING)
665 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
666 						*msg_word, 1);
667 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
668 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
669 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
670 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
671 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
672 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
673 
674 	/* word 4 */
675 	msg_word++;
676 	*msg_word = 0;
677 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
678 		hp_addr & 0xffffffff);
679 
680 	/* word 5 */
681 	msg_word++;
682 	*msg_word = 0;
683 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
684 		(uint64_t)hp_addr >> 32);
685 
686 	/* word 6 */
687 	msg_word++;
688 	*msg_word = 0;
689 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
690 		tp_addr & 0xffffffff);
691 
692 	/* word 7 */
693 	msg_word++;
694 	*msg_word = 0;
695 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
696 		(uint64_t)tp_addr >> 32);
697 
698 	/* word 8 */
699 	msg_word++;
700 	*msg_word = 0;
701 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
702 		srng_params.msi_addr & 0xffffffff);
703 
704 	/* word 9 */
705 	msg_word++;
706 	*msg_word = 0;
707 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
708 		(uint64_t)(srng_params.msi_addr) >> 32);
709 
710 	/* word 10 */
711 	msg_word++;
712 	*msg_word = 0;
713 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
714 		srng_params.msi_data);
715 
716 	/* word 11 */
717 	msg_word++;
718 	*msg_word = 0;
719 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
720 		srng_params.intr_batch_cntr_thres_entries *
721 		ring_entry_size);
722 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
723 		srng_params.intr_timer_thres_us >> 3);
724 
725 	/* word 12 */
726 	msg_word++;
727 	*msg_word = 0;
728 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
729 		/* TODO: Setting low threshold to 1/8th of ring size - see
730 		 * if this needs to be configurable
731 		 */
732 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
733 			srng_params.low_threshold);
734 	}
735 	/* "response_required" field should be set if a HTT response message is
736 	 * required after setting up the ring.
737 	 */
738 	pkt = htt_htc_pkt_alloc(soc);
739 	if (!pkt)
740 		goto fail1;
741 
742 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
743 
744 	SET_HTC_PACKET_INFO_TX(
745 		&pkt->htc_pkt,
746 		dp_htt_h2t_send_complete_free_netbuf,
747 		qdf_nbuf_data(htt_msg),
748 		qdf_nbuf_len(htt_msg),
749 		soc->htc_endpoint,
750 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
751 
752 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
753 	DP_HTT_SEND_HTC_PKT(soc, pkt);
754 
755 	return QDF_STATUS_SUCCESS;
756 
757 fail1:
758 	qdf_nbuf_free(htt_msg);
759 fail0:
760 	return QDF_STATUS_E_FAILURE;
761 }
762 
763 /*
764  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
765  * config message to target
766  * @htt_soc:	HTT SOC handle
767  * @pdev_id:	PDEV Id
768  * @hal_srng:	Opaque HAL SRNG pointer
769  * @hal_ring_type:	SRNG ring type
770  * @ring_buf_size:	SRNG buffer size
771  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
772  * Return: 0 on success; error code on failure
773  */
774 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
775 	int hal_ring_type, int ring_buf_size,
776 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
777 {
778 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
779 	struct dp_htt_htc_pkt *pkt;
780 	qdf_nbuf_t htt_msg;
781 	uint32_t *msg_word;
782 	struct hal_srng_params srng_params;
783 	uint32_t htt_ring_type, htt_ring_id;
784 	uint32_t tlv_filter;
785 
786 	htt_msg = qdf_nbuf_alloc(soc->osdev,
787 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
788 	/* reserve room for the HTC header */
789 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
790 	if (!htt_msg)
791 		goto fail0;
792 
793 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
794 
795 	switch (hal_ring_type) {
796 	case RXDMA_BUF:
797 #if QCA_HOST2FW_RXBUF_RING
798 		htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
799 		htt_ring_type = HTT_SW_TO_SW_RING;
800 #else
801 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
802 		htt_ring_type = HTT_SW_TO_HW_RING;
803 #endif
804 		break;
805 	case RXDMA_MONITOR_BUF:
806 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
807 		htt_ring_type = HTT_SW_TO_HW_RING;
808 		break;
809 	case RXDMA_MONITOR_STATUS:
810 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
811 		htt_ring_type = HTT_SW_TO_HW_RING;
812 		break;
813 	case RXDMA_MONITOR_DST:
814 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
815 		htt_ring_type = HTT_HW_TO_SW_RING;
816 		break;
817 	case RXDMA_MONITOR_DESC:
818 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
819 		htt_ring_type = HTT_SW_TO_HW_RING;
820 		break;
821 	case RXDMA_DST:
822 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
823 		htt_ring_type = HTT_HW_TO_SW_RING;
824 		break;
825 
826 	default:
827 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
828 			"%s: Ring currently not supported", __func__);
829 		goto fail1;
830 	}
831 
832 	/*
833 	 * Set the length of the message.
834 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
835 	 * separately during the below call to qdf_nbuf_push_head.
836 	 * The contribution from the HTC header is added separately inside HTC.
837 	 */
838 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
839 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
840 			"%s: Failed to expand head for RX Ring Cfg msg",
841 			__func__);
842 		goto fail1; /* failure */
843 	}
844 
845 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
846 
847 	/* rewind beyond alignment pad to get to the HTC header reserved area */
848 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
849 
850 	/* word 0 */
851 	*msg_word = 0;
852 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
853 
854 	/*
855 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
856 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
857 	 */
858 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
859 			htt_ring_type == HTT_SW_TO_HW_RING)
860 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
861 						DP_SW2HW_MACID(pdev_id));
862 
863 	/* TODO: Discuss with FW on changing this to unique ID and using
864 	 * htt_ring_type to send the type of ring
865 	 */
866 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
867 
868 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
869 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
870 
871 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
872 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
873 
874 	/* word 1 */
875 	msg_word++;
876 	*msg_word = 0;
877 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
878 		ring_buf_size);
879 
880 	/* word 2 */
881 	msg_word++;
882 	*msg_word = 0;
883 
884 	if (htt_tlv_filter->enable_fp) {
885 		/* TYPE: MGMT */
886 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
887 			FP, MGMT, 0000,
888 			(htt_tlv_filter->fp_mgmt_filter &
889 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
890 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
891 			FP, MGMT, 0001,
892 			(htt_tlv_filter->fp_mgmt_filter &
893 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
894 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
895 			FP, MGMT, 0010,
896 			(htt_tlv_filter->fp_mgmt_filter &
897 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
898 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
899 			FP, MGMT, 0011,
900 			(htt_tlv_filter->fp_mgmt_filter &
901 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
902 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
903 			FP, MGMT, 0100,
904 			(htt_tlv_filter->fp_mgmt_filter &
905 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
906 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
907 			FP, MGMT, 0101,
908 			(htt_tlv_filter->fp_mgmt_filter &
909 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
910 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
911 			FP, MGMT, 0110,
912 			(htt_tlv_filter->fp_mgmt_filter &
913 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
914 		/* reserved */
915 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
916 			MGMT, 0111,
917 			(htt_tlv_filter->fp_mgmt_filter &
918 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
919 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
920 			FP, MGMT, 1000,
921 			(htt_tlv_filter->fp_mgmt_filter &
922 			FILTER_MGMT_BEACON) ? 1 : 0);
923 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
924 			FP, MGMT, 1001,
925 			(htt_tlv_filter->fp_mgmt_filter &
926 			FILTER_MGMT_ATIM) ? 1 : 0);
927 	}
928 
929 	if (htt_tlv_filter->enable_md) {
930 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
931 				MGMT, 0000, 1);
932 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
933 				MGMT, 0001, 1);
934 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
935 				MGMT, 0010, 1);
936 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
937 				MGMT, 0011, 1);
938 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
939 				MGMT, 0100, 1);
940 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
941 				MGMT, 0101, 1);
942 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
943 				MGMT, 0110, 1);
944 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
945 				MGMT, 0111, 1);
946 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
947 				MGMT, 1000, 1);
948 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
949 				MGMT, 1001, 1);
950 	}
951 
952 	if (htt_tlv_filter->enable_mo) {
953 		/* TYPE: MGMT */
954 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
955 			MO, MGMT, 0000,
956 			(htt_tlv_filter->mo_mgmt_filter &
957 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
958 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
959 			MO, MGMT, 0001,
960 			(htt_tlv_filter->mo_mgmt_filter &
961 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
962 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
963 			MO, MGMT, 0010,
964 			(htt_tlv_filter->mo_mgmt_filter &
965 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
966 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
967 			MO, MGMT, 0011,
968 			(htt_tlv_filter->mo_mgmt_filter &
969 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
970 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
971 			MO, MGMT, 0100,
972 			(htt_tlv_filter->mo_mgmt_filter &
973 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
974 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
975 			MO, MGMT, 0101,
976 			(htt_tlv_filter->mo_mgmt_filter &
977 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
978 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
979 			MO, MGMT, 0110,
980 			(htt_tlv_filter->mo_mgmt_filter &
981 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
982 		/* reserved */
983 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
984 			MGMT, 0111,
985 			(htt_tlv_filter->mo_mgmt_filter &
986 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
987 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
988 			MO, MGMT, 1000,
989 			(htt_tlv_filter->mo_mgmt_filter &
990 			FILTER_MGMT_BEACON) ? 1 : 0);
991 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
992 			MO, MGMT, 1001,
993 			(htt_tlv_filter->mo_mgmt_filter &
994 			FILTER_MGMT_ATIM) ? 1 : 0);
995 	}
996 
997 	/* word 3 */
998 	msg_word++;
999 	*msg_word = 0;
1000 
1001 	if (htt_tlv_filter->enable_fp) {
1002 		/* TYPE: MGMT */
1003 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1004 			FP, MGMT, 1010,
1005 			(htt_tlv_filter->fp_mgmt_filter &
1006 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1007 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1008 			FP, MGMT, 1011,
1009 			(htt_tlv_filter->fp_mgmt_filter &
1010 			FILTER_MGMT_AUTH) ? 1 : 0);
1011 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1012 			FP, MGMT, 1100,
1013 			(htt_tlv_filter->fp_mgmt_filter &
1014 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1015 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1016 			FP, MGMT, 1101,
1017 			(htt_tlv_filter->fp_mgmt_filter &
1018 			FILTER_MGMT_ACTION) ? 1 : 0);
1019 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1020 			FP, MGMT, 1110,
1021 			(htt_tlv_filter->fp_mgmt_filter &
1022 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1023 		/* reserved*/
1024 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1025 			MGMT, 1111,
1026 			(htt_tlv_filter->fp_mgmt_filter &
1027 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1028 	}
1029 
1030 	if (htt_tlv_filter->enable_md) {
1031 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1032 				MGMT, 1010, 1);
1033 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1034 				MGMT, 1011, 1);
1035 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1036 				MGMT, 1100, 1);
1037 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1038 				MGMT, 1101, 1);
1039 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
1040 				MGMT, 1110, 1);
1041 	}
1042 
1043 	if (htt_tlv_filter->enable_mo) {
1044 		/* TYPE: MGMT */
1045 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1046 			MO, MGMT, 1010,
1047 			(htt_tlv_filter->mo_mgmt_filter &
1048 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1049 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1050 			MO, MGMT, 1011,
1051 			(htt_tlv_filter->mo_mgmt_filter &
1052 			FILTER_MGMT_AUTH) ? 1 : 0);
1053 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1054 			MO, MGMT, 1100,
1055 			(htt_tlv_filter->mo_mgmt_filter &
1056 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1057 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1058 			MO, MGMT, 1101,
1059 			(htt_tlv_filter->mo_mgmt_filter &
1060 			FILTER_MGMT_ACTION) ? 1 : 0);
1061 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1062 			MO, MGMT, 1110,
1063 			(htt_tlv_filter->mo_mgmt_filter &
1064 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1065 		/* reserved*/
1066 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1067 			MGMT, 1111,
1068 			(htt_tlv_filter->mo_mgmt_filter &
1069 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1070 	}
1071 
1072 	/* word 4 */
1073 	msg_word++;
1074 	*msg_word = 0;
1075 
1076 	if (htt_tlv_filter->enable_fp) {
1077 		/* TYPE: CTRL */
1078 		/* reserved */
1079 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1080 			CTRL, 0000,
1081 			(htt_tlv_filter->fp_ctrl_filter &
1082 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1083 		/* reserved */
1084 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1085 			CTRL, 0001,
1086 			(htt_tlv_filter->fp_ctrl_filter &
1087 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1088 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1089 			CTRL, 0010,
1090 			(htt_tlv_filter->fp_ctrl_filter &
1091 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1092 		/* reserved */
1093 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1094 			CTRL, 0011,
1095 			(htt_tlv_filter->fp_ctrl_filter &
1096 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1097 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1098 			CTRL, 0100,
1099 			(htt_tlv_filter->fp_ctrl_filter &
1100 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1101 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1102 			CTRL, 0101,
1103 			(htt_tlv_filter->fp_ctrl_filter &
1104 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1105 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1106 			CTRL, 0110,
1107 			(htt_tlv_filter->fp_ctrl_filter &
1108 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1109 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1110 			CTRL, 0111,
1111 			(htt_tlv_filter->fp_ctrl_filter &
1112 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1114 			CTRL, 1000,
1115 			(htt_tlv_filter->fp_ctrl_filter &
1116 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1117 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1118 			CTRL, 1001,
1119 			(htt_tlv_filter->fp_ctrl_filter &
1120 			FILTER_CTRL_BA) ? 1 : 0);
1121 	}
1122 
1123 	if (htt_tlv_filter->enable_md) {
1124 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1125 				CTRL, 0000, 1);
1126 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1127 				CTRL, 0001, 1);
1128 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1129 				CTRL, 0010, 1);
1130 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1131 				CTRL, 0011, 1);
1132 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1133 				CTRL, 0100, 1);
1134 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1135 				CTRL, 0101, 1);
1136 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1137 				CTRL, 0110, 1);
1138 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1139 				CTRL, 0111, 1);
1140 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1141 				CTRL, 1000, 1);
1142 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1143 				CTRL, 1001, 1);
1144 	}
1145 
1146 	if (htt_tlv_filter->enable_mo) {
1147 		/* TYPE: CTRL */
1148 		/* reserved */
1149 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1150 			CTRL, 0000,
1151 			(htt_tlv_filter->mo_ctrl_filter &
1152 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1153 		/* reserved */
1154 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1155 			CTRL, 0001,
1156 			(htt_tlv_filter->mo_ctrl_filter &
1157 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1158 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1159 			CTRL, 0010,
1160 			(htt_tlv_filter->mo_ctrl_filter &
1161 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1162 		/* reserved */
1163 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1164 			CTRL, 0011,
1165 			(htt_tlv_filter->mo_ctrl_filter &
1166 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1167 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1168 			CTRL, 0100,
1169 			(htt_tlv_filter->mo_ctrl_filter &
1170 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1171 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1172 			CTRL, 0101,
1173 			(htt_tlv_filter->mo_ctrl_filter &
1174 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1175 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1176 			CTRL, 0110,
1177 			(htt_tlv_filter->mo_ctrl_filter &
1178 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1179 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1180 			CTRL, 0111,
1181 			(htt_tlv_filter->mo_ctrl_filter &
1182 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1183 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1184 			CTRL, 1000,
1185 			(htt_tlv_filter->mo_ctrl_filter &
1186 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1187 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1188 			CTRL, 1001,
1189 			(htt_tlv_filter->mo_ctrl_filter &
1190 			FILTER_CTRL_BA) ? 1 : 0);
1191 	}
1192 
1193 	/* word 5 */
1194 	msg_word++;
1195 	*msg_word = 0;
1196 	if (htt_tlv_filter->enable_fp) {
1197 		/* TYPE: CTRL */
1198 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1199 			CTRL, 1010,
1200 			(htt_tlv_filter->fp_ctrl_filter &
1201 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1202 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1203 			CTRL, 1011,
1204 			(htt_tlv_filter->fp_ctrl_filter &
1205 			FILTER_CTRL_RTS) ? 1 : 0);
1206 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1207 			CTRL, 1100,
1208 			(htt_tlv_filter->fp_ctrl_filter &
1209 			FILTER_CTRL_CTS) ? 1 : 0);
1210 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1211 			CTRL, 1101,
1212 			(htt_tlv_filter->fp_ctrl_filter &
1213 			FILTER_CTRL_ACK) ? 1 : 0);
1214 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1215 			CTRL, 1110,
1216 			(htt_tlv_filter->fp_ctrl_filter &
1217 			FILTER_CTRL_CFEND) ? 1 : 0);
1218 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1219 			CTRL, 1111,
1220 			(htt_tlv_filter->fp_ctrl_filter &
1221 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1222 		/* TYPE: DATA */
1223 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1224 			DATA, MCAST,
1225 			(htt_tlv_filter->fp_data_filter &
1226 			FILTER_DATA_MCAST) ? 1 : 0);
1227 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1228 			DATA, UCAST,
1229 			(htt_tlv_filter->fp_data_filter &
1230 			FILTER_DATA_UCAST) ? 1 : 0);
1231 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1232 			DATA, NULL,
1233 			(htt_tlv_filter->fp_data_filter &
1234 			FILTER_DATA_NULL) ? 1 : 0);
1235 	}
1236 
1237 	if (htt_tlv_filter->enable_md) {
1238 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1239 				CTRL, 1010, 1);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1241 				CTRL, 1011, 1);
1242 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1243 				CTRL, 1100, 1);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1245 				CTRL, 1101, 1);
1246 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1247 				CTRL, 1110, 1);
1248 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1249 				CTRL, 1111, 1);
1250 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1251 				DATA, MCAST, 1);
1252 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1253 				DATA, UCAST, 1);
1254 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1255 				DATA, NULL, 1);
1256 	}
1257 
1258 	if (htt_tlv_filter->enable_mo) {
1259 		/* TYPE: CTRL */
1260 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1261 			CTRL, 1010,
1262 			(htt_tlv_filter->mo_ctrl_filter &
1263 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1264 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1265 			CTRL, 1011,
1266 			(htt_tlv_filter->mo_ctrl_filter &
1267 			FILTER_CTRL_RTS) ? 1 : 0);
1268 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1269 			CTRL, 1100,
1270 			(htt_tlv_filter->mo_ctrl_filter &
1271 			FILTER_CTRL_CTS) ? 1 : 0);
1272 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1273 			CTRL, 1101,
1274 			(htt_tlv_filter->mo_ctrl_filter &
1275 			FILTER_CTRL_ACK) ? 1 : 0);
1276 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1277 			CTRL, 1110,
1278 			(htt_tlv_filter->mo_ctrl_filter &
1279 			FILTER_CTRL_CFEND) ? 1 : 0);
1280 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1281 			CTRL, 1111,
1282 			(htt_tlv_filter->mo_ctrl_filter &
1283 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1284 		/* TYPE: DATA */
1285 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1286 			DATA, MCAST,
1287 			(htt_tlv_filter->mo_data_filter &
1288 			FILTER_DATA_MCAST) ? 1 : 0);
1289 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1290 			DATA, UCAST,
1291 			(htt_tlv_filter->mo_data_filter &
1292 			FILTER_DATA_UCAST) ? 1 : 0);
1293 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1294 			DATA, NULL,
1295 			(htt_tlv_filter->mo_data_filter &
1296 			FILTER_DATA_NULL) ? 1 : 0);
1297 	}
1298 
1299 	/* word 6 */
1300 	msg_word++;
1301 	*msg_word = 0;
1302 	tlv_filter = 0;
1303 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1304 		htt_tlv_filter->mpdu_start);
1305 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1306 		htt_tlv_filter->msdu_start);
1307 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1308 		htt_tlv_filter->packet);
1309 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1310 		htt_tlv_filter->msdu_end);
1311 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1312 		htt_tlv_filter->mpdu_end);
1313 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1314 		htt_tlv_filter->packet_header);
1315 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1316 		htt_tlv_filter->attention);
1317 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1318 		htt_tlv_filter->ppdu_start);
1319 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1320 		htt_tlv_filter->ppdu_end);
1321 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1322 		htt_tlv_filter->ppdu_end_user_stats);
1323 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1324 		PPDU_END_USER_STATS_EXT,
1325 		htt_tlv_filter->ppdu_end_user_stats_ext);
1326 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1327 		htt_tlv_filter->ppdu_end_status_done);
1328 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1329 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1330 		 htt_tlv_filter->header_per_msdu);
1331 
1332 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1333 
1334 	/* "response_required" field should be set if a HTT response message is
1335 	 * required after setting up the ring.
1336 	 */
1337 	pkt = htt_htc_pkt_alloc(soc);
1338 	if (!pkt)
1339 		goto fail1;
1340 
1341 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1342 
1343 	SET_HTC_PACKET_INFO_TX(
1344 		&pkt->htc_pkt,
1345 		dp_htt_h2t_send_complete_free_netbuf,
1346 		qdf_nbuf_data(htt_msg),
1347 		qdf_nbuf_len(htt_msg),
1348 		soc->htc_endpoint,
1349 		1); /* tag - not relevant here */
1350 
1351 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1352 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1353 	return QDF_STATUS_SUCCESS;
1354 
1355 fail1:
1356 	qdf_nbuf_free(htt_msg);
1357 fail0:
1358 	return QDF_STATUS_E_FAILURE;
1359 }
1360 
1361 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1362 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1363 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1364 
1365 {
1366 	uint32_t pdev_id;
1367 	uint32_t *msg_word = NULL;
1368 	uint32_t msg_remain_len = 0;
1369 
1370 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1371 
1372 	/*COOKIE MSB*/
1373 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1374 
1375 	/* stats message length + 16 size of HTT header*/
1376 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1377 				(uint32_t)DP_EXT_MSG_LENGTH);
1378 
1379 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1380 			msg_word,  msg_remain_len,
1381 			WDI_NO_VAL, pdev_id);
1382 
1383 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1384 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1385 	}
1386 	/* Need to be freed here as WDI handler will
1387 	 * make a copy of pkt to send data to application
1388 	 */
1389 	qdf_nbuf_free(htt_msg);
1390 	return QDF_STATUS_SUCCESS;
1391 }
1392 #else
1393 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1394 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1395 {
1396 	return QDF_STATUS_E_NOSUPPORT;
1397 }
1398 #endif
1399 
1400 /**
1401  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1402  * @htt_stats: htt stats info
1403  *
1404  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1405  * contains sub messages which are identified by a TLV header.
1406  * In this function we will process the stream of T2H messages and read all the
1407  * TLV contained in the message.
1408  *
1409  * THe following cases have been taken care of
1410  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1411  *		In this case the buffer will contain multiple tlvs.
1412  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1413  *		Only one tlv will be contained in the HTT message and this tag
1414  *		will extend onto the next buffer.
1415  * Case 3: When the buffer is the continuation of the previous message
1416  * Case 4: tlv length is 0. which will indicate the end of message
1417  *
1418  * return: void
1419  */
1420 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1421 					struct dp_soc *soc)
1422 {
1423 	htt_tlv_tag_t tlv_type = 0xff;
1424 	qdf_nbuf_t htt_msg = NULL;
1425 	uint32_t *msg_word;
1426 	uint8_t *tlv_buf_head = NULL;
1427 	uint8_t *tlv_buf_tail = NULL;
1428 	uint32_t msg_remain_len = 0;
1429 	uint32_t tlv_remain_len = 0;
1430 	uint32_t *tlv_start;
1431 	int cookie_val;
1432 	int cookie_msb;
1433 	int pdev_id;
1434 	bool copy_stats = false;
1435 	struct dp_pdev *pdev;
1436 
1437 	/* Process node in the HTT message queue */
1438 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1439 		!= NULL) {
1440 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1441 		cookie_val = *(msg_word + 1);
1442 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1443 					*(msg_word +
1444 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1445 
1446 		if (cookie_val) {
1447 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1448 					== QDF_STATUS_SUCCESS) {
1449 				continue;
1450 			}
1451 		}
1452 
1453 		cookie_msb = *(msg_word + 2);
1454 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1455 		pdev = soc->pdev_list[pdev_id];
1456 
1457 		if (cookie_msb >> 2) {
1458 			copy_stats = true;
1459 		}
1460 
1461 		/* read 5th word */
1462 		msg_word = msg_word + 4;
1463 		msg_remain_len = qdf_min(htt_stats->msg_len,
1464 				(uint32_t) DP_EXT_MSG_LENGTH);
1465 		/* Keep processing the node till node length is 0 */
1466 		while (msg_remain_len) {
1467 			/*
1468 			 * if message is not a continuation of previous message
1469 			 * read the tlv type and tlv length
1470 			 */
1471 			if (!tlv_buf_head) {
1472 				tlv_type = HTT_STATS_TLV_TAG_GET(
1473 						*msg_word);
1474 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1475 						*msg_word);
1476 			}
1477 
1478 			if (tlv_remain_len == 0) {
1479 				msg_remain_len = 0;
1480 
1481 				if (tlv_buf_head) {
1482 					qdf_mem_free(tlv_buf_head);
1483 					tlv_buf_head = NULL;
1484 					tlv_buf_tail = NULL;
1485 				}
1486 
1487 				goto error;
1488 			}
1489 
1490 			if (!tlv_buf_head)
1491 				tlv_remain_len += HTT_TLV_HDR_LEN;
1492 
1493 			if ((tlv_remain_len <= msg_remain_len)) {
1494 				/* Case 3 */
1495 				if (tlv_buf_head) {
1496 					qdf_mem_copy(tlv_buf_tail,
1497 							(uint8_t *)msg_word,
1498 							tlv_remain_len);
1499 					tlv_start = (uint32_t *)tlv_buf_head;
1500 				} else {
1501 					/* Case 1 */
1502 					tlv_start = msg_word;
1503 				}
1504 
1505 				if (copy_stats)
1506 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1507 				else
1508 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1509 
1510 				msg_remain_len -= tlv_remain_len;
1511 
1512 				msg_word = (uint32_t *)
1513 					(((uint8_t *)msg_word) +
1514 					tlv_remain_len);
1515 
1516 				tlv_remain_len = 0;
1517 
1518 				if (tlv_buf_head) {
1519 					qdf_mem_free(tlv_buf_head);
1520 					tlv_buf_head = NULL;
1521 					tlv_buf_tail = NULL;
1522 				}
1523 
1524 			} else { /* tlv_remain_len > msg_remain_len */
1525 				/* Case 2 & 3 */
1526 				if (!tlv_buf_head) {
1527 					tlv_buf_head = qdf_mem_malloc(
1528 							tlv_remain_len);
1529 
1530 					if (!tlv_buf_head) {
1531 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1532 								QDF_TRACE_LEVEL_ERROR,
1533 								"Alloc failed");
1534 						goto error;
1535 					}
1536 
1537 					tlv_buf_tail = tlv_buf_head;
1538 				}
1539 
1540 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1541 						msg_remain_len);
1542 				tlv_remain_len -= msg_remain_len;
1543 				tlv_buf_tail += msg_remain_len;
1544 			}
1545 		}
1546 
1547 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1548 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1549 		}
1550 
1551 		qdf_nbuf_free(htt_msg);
1552 	}
1553 	return;
1554 
1555 error:
1556 	qdf_nbuf_free(htt_msg);
1557 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1558 			!= NULL)
1559 		qdf_nbuf_free(htt_msg);
1560 }
1561 
1562 void htt_t2h_stats_handler(void *context)
1563 {
1564 	struct dp_soc *soc = (struct dp_soc *)context;
1565 	struct htt_stats_context htt_stats;
1566 	uint32_t *msg_word;
1567 	qdf_nbuf_t htt_msg = NULL;
1568 	uint8_t done;
1569 	uint8_t rem_stats;
1570 
1571 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1572 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1573 			"soc: 0x%pK, init_done: %d", soc,
1574 			qdf_atomic_read(&soc->cmn_init_done));
1575 		return;
1576 	}
1577 
1578 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1579 	qdf_nbuf_queue_init(&htt_stats.msg);
1580 
1581 	/* pull one completed stats from soc->htt_stats_msg and process */
1582 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1583 	if (!soc->htt_stats.num_stats) {
1584 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1585 		return;
1586 	}
1587 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1588 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1589 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1590 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1591 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1592 		/*
1593 		 * Done bit signifies that this is the last T2H buffer in the
1594 		 * stream of HTT EXT STATS message
1595 		 */
1596 		if (done)
1597 			break;
1598 	}
1599 	rem_stats = --soc->htt_stats.num_stats;
1600 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1601 
1602 	dp_process_htt_stat_msg(&htt_stats, soc);
1603 	/* If there are more stats to process, schedule stats work again */
1604 	if (rem_stats)
1605 		qdf_sched_work(0, &soc->htt_stats.work);
1606 }
1607 
1608 /*
1609  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1610  * if a new peer id arrives in a PPDU
1611  * pdev: DP pdev handle
1612  * @peer_id : peer unique identifier
1613  * @ppdu_info: per ppdu tlv structure
1614  *
1615  * return:user index to be populated
1616  */
1617 #ifdef FEATURE_PERPKT_INFO
1618 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1619 						uint16_t peer_id,
1620 						struct ppdu_info *ppdu_info)
1621 {
1622 	uint8_t user_index = 0;
1623 	struct cdp_tx_completion_ppdu *ppdu_desc;
1624 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1625 
1626 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1627 
1628 	while ((user_index + 1) <= ppdu_info->last_user) {
1629 		ppdu_user_desc = &ppdu_desc->user[user_index];
1630 		if (ppdu_user_desc->peer_id != peer_id) {
1631 			user_index++;
1632 			continue;
1633 		} else {
1634 			/* Max users possible is 8 so user array index should
1635 			 * not exceed 7
1636 			 */
1637 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1638 			return user_index;
1639 		}
1640 	}
1641 
1642 	ppdu_info->last_user++;
1643 	/* Max users possible is 8 so last user should not exceed 8 */
1644 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1645 	return ppdu_info->last_user - 1;
1646 }
1647 
1648 /*
1649  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1650  * pdev: DP pdev handle
1651  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1652  * @ppdu_info: per ppdu tlv structure
1653  *
1654  * return:void
1655  */
1656 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1657 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1658 {
1659 	uint16_t frame_type;
1660 	uint16_t freq;
1661 	struct dp_soc *soc = NULL;
1662 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1663 
1664 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1665 
1666 	tag_buf += 2;
1667 	ppdu_desc->num_users =
1668 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1669 	tag_buf++;
1670 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1671 
1672 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1673 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1674 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1675 	else if ((frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR) ||
1676 		 (frame_type == HTT_STATS_FTYPE_SGEN_BAR))
1677 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1678 	else
1679 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1680 
1681 	tag_buf += 2;
1682 	ppdu_desc->tx_duration = *tag_buf;
1683 	tag_buf += 3;
1684 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1685 
1686 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1687 					ppdu_desc->tx_duration;
1688 	/* Ack time stamp is same as end time stamp*/
1689 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1690 
1691 	tag_buf++;
1692 
1693 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1694 	if (freq != ppdu_desc->channel) {
1695 		soc = pdev->soc;
1696 		ppdu_desc->channel = freq;
1697 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1698 			pdev->operating_channel =
1699 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
1700 	}
1701 
1702 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1703 }
1704 
1705 /*
1706  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1707  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1708  * @ppdu_info: per ppdu tlv structure
1709  *
1710  * return:void
1711  */
1712 static void dp_process_ppdu_stats_user_common_tlv(
1713 		struct dp_pdev *pdev, uint32_t *tag_buf,
1714 		struct ppdu_info *ppdu_info)
1715 {
1716 	uint16_t peer_id;
1717 	struct dp_peer *peer;
1718 	struct cdp_tx_completion_ppdu *ppdu_desc;
1719 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1720 	uint8_t curr_user_index = 0;
1721 
1722 	ppdu_desc =
1723 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1724 
1725 	tag_buf++;
1726 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1727 
1728 	curr_user_index =
1729 		dp_get_ppdu_info_user_index(pdev,
1730 					    peer_id, ppdu_info);
1731 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1732 
1733 	if (peer_id == DP_SCAN_PEER_ID) {
1734 		ppdu_desc->vdev_id =
1735 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
1736 	} else {
1737 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1738 		if (!peer)
1739 			return;
1740 	}
1741 
1742 	ppdu_user_desc->peer_id = peer_id;
1743 
1744 	tag_buf++;
1745 
1746 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1747 		ppdu_user_desc->delayed_ba = 1;
1748 	}
1749 
1750 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1751 		ppdu_user_desc->is_mcast = true;
1752 		ppdu_user_desc->mpdu_tried_mcast =
1753 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1754 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1755 	} else {
1756 		ppdu_user_desc->mpdu_tried_ucast =
1757 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1758 	}
1759 
1760 	tag_buf++;
1761 
1762 	ppdu_user_desc->qos_ctrl =
1763 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1764 	ppdu_user_desc->frame_ctrl =
1765 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1766 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1767 
1768 	if (ppdu_user_desc->delayed_ba) {
1769 		ppdu_user_desc->mpdu_success = 0;
1770 		ppdu_user_desc->mpdu_tried_mcast = 0;
1771 		ppdu_user_desc->mpdu_tried_ucast = 0;
1772 	}
1773 }
1774 
1775 
1776 /**
1777  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1778  * @pdev: DP pdev handle
1779  * @tag_buf: T2H message buffer carrying the user rate TLV
1780  * @ppdu_info: per ppdu tlv structure
1781  *
1782  * return:void
1783  */
1784 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1785 		uint32_t *tag_buf,
1786 		struct ppdu_info *ppdu_info)
1787 {
1788 	uint16_t peer_id;
1789 	struct dp_peer *peer;
1790 	struct cdp_tx_completion_ppdu *ppdu_desc;
1791 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1792 	uint8_t curr_user_index = 0;
1793 	struct dp_vdev *vdev;
1794 
1795 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1796 
1797 	tag_buf++;
1798 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1799 
1800 	curr_user_index =
1801 		dp_get_ppdu_info_user_index(pdev,
1802 					    peer_id, ppdu_info);
1803 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1804 
1805 	if (peer_id == DP_SCAN_PEER_ID) {
1806 		vdev =
1807 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
1808 							  ppdu_desc->vdev_id);
1809 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
1810 			     DP_MAC_ADDR_LEN);
1811 	} else {
1812 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1813 		if (!peer)
1814 			return;
1815 		qdf_mem_copy(ppdu_user_desc->mac_addr,
1816 			     peer->mac_addr.raw, DP_MAC_ADDR_LEN);
1817 	}
1818 
1819 	ppdu_user_desc->peer_id = peer_id;
1820 
1821 	ppdu_user_desc->tid =
1822 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1823 
1824 	tag_buf += 2;
1825 
1826 	ppdu_user_desc->ru_tones =
1827 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1828 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1829 
1830 	tag_buf += 2;
1831 
1832 	ppdu_user_desc->ppdu_type =
1833 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1834 
1835 	tag_buf++;
1836 	ppdu_user_desc->tx_rate = *tag_buf;
1837 
1838 	ppdu_user_desc->ltf_size =
1839 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1840 	ppdu_user_desc->stbc =
1841 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1842 	ppdu_user_desc->he_re =
1843 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1844 	ppdu_user_desc->txbf =
1845 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
1846 	ppdu_user_desc->bw =
1847 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
1848 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
1849 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
1850 	ppdu_user_desc->preamble =
1851 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
1852 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
1853 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
1854 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
1855 }
1856 
1857 /*
1858  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
1859  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1860  * pdev: DP PDEV handle
1861  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1862  * @ppdu_info: per ppdu tlv structure
1863  *
1864  * return:void
1865  */
1866 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
1867 		struct dp_pdev *pdev, uint32_t *tag_buf,
1868 		struct ppdu_info *ppdu_info)
1869 {
1870 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
1871 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
1872 
1873 	struct cdp_tx_completion_ppdu *ppdu_desc;
1874 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1875 	uint8_t curr_user_index = 0;
1876 	uint16_t peer_id;
1877 	struct dp_peer *peer;
1878 
1879 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1880 
1881 	tag_buf++;
1882 
1883 	peer_id =
1884 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1885 
1886 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1887 
1888 	if (!peer)
1889 		return;
1890 
1891 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1892 
1893 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1894 	ppdu_user_desc->peer_id = peer_id;
1895 
1896 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1897 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1898 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1899 }
1900 
1901 /*
1902  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
1903  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1904  * soc: DP SOC handle
1905  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1906  * @ppdu_info: per ppdu tlv structure
1907  *
1908  * return:void
1909  */
1910 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
1911 		struct dp_pdev *pdev, uint32_t *tag_buf,
1912 		struct ppdu_info *ppdu_info)
1913 {
1914 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
1915 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
1916 
1917 	struct cdp_tx_completion_ppdu *ppdu_desc;
1918 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1919 	uint8_t curr_user_index = 0;
1920 	uint16_t peer_id;
1921 	struct dp_peer *peer;
1922 
1923 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1924 
1925 	tag_buf++;
1926 
1927 	peer_id =
1928 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1929 
1930 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1931 
1932 	if (!peer)
1933 		return;
1934 
1935 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1936 
1937 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1938 	ppdu_user_desc->peer_id = peer_id;
1939 
1940 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1941 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1942 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
1943 }
1944 
1945 /*
1946  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
1947  * htt_ppdu_stats_user_cmpltn_common_tlv
1948  * soc: DP SOC handle
1949  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
1950  * @ppdu_info: per ppdu tlv structure
1951  *
1952  * return:void
1953  */
1954 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
1955 		struct dp_pdev *pdev, uint32_t *tag_buf,
1956 		struct ppdu_info *ppdu_info)
1957 {
1958 	uint16_t peer_id;
1959 	struct dp_peer *peer;
1960 	struct cdp_tx_completion_ppdu *ppdu_desc;
1961 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1962 	uint8_t curr_user_index = 0;
1963 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
1964 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
1965 
1966 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1967 
1968 	tag_buf++;
1969 	peer_id =
1970 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
1971 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1972 
1973 	if (!peer)
1974 		return;
1975 
1976 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
1977 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1978 	ppdu_user_desc->peer_id = peer_id;
1979 
1980 	ppdu_user_desc->completion_status =
1981 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
1982 				*tag_buf);
1983 
1984 	ppdu_user_desc->tid =
1985 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
1986 
1987 
1988 	tag_buf++;
1989 	ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
1990 
1991 	tag_buf++;
1992 
1993 	ppdu_user_desc->mpdu_success =
1994 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
1995 
1996 	tag_buf++;
1997 
1998 	ppdu_user_desc->long_retries =
1999 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2000 
2001 	ppdu_user_desc->short_retries =
2002 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2003 	ppdu_user_desc->retry_msdus =
2004 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2005 
2006 	ppdu_user_desc->is_ampdu =
2007 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2008 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2009 
2010 }
2011 
2012 /*
2013  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2014  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2015  * pdev: DP PDEV handle
2016  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2017  * @ppdu_info: per ppdu tlv structure
2018  *
2019  * return:void
2020  */
2021 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2022 		struct dp_pdev *pdev, uint32_t *tag_buf,
2023 		struct ppdu_info *ppdu_info)
2024 {
2025 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2026 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2027 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2028 	struct cdp_tx_completion_ppdu *ppdu_desc;
2029 	uint8_t curr_user_index = 0;
2030 	uint16_t peer_id;
2031 	struct dp_peer *peer;
2032 
2033 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2034 
2035 	tag_buf++;
2036 
2037 	peer_id =
2038 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2039 
2040 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2041 
2042 	if (!peer)
2043 		return;
2044 
2045 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2046 
2047 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2048 	ppdu_user_desc->peer_id = peer_id;
2049 
2050 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2051 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2052 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2053 }
2054 
2055 /*
2056  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2057  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2058  * pdev: DP PDEV handle
2059  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2060  * @ppdu_info: per ppdu tlv structure
2061  *
2062  * return:void
2063  */
2064 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2065 		struct dp_pdev *pdev, uint32_t *tag_buf,
2066 		struct ppdu_info *ppdu_info)
2067 {
2068 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2069 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2070 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2071 	struct cdp_tx_completion_ppdu *ppdu_desc;
2072 	uint8_t curr_user_index = 0;
2073 	uint16_t peer_id;
2074 	struct dp_peer *peer;
2075 
2076 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2077 
2078 	tag_buf++;
2079 
2080 	peer_id =
2081 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2082 
2083 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2084 
2085 	if (!peer)
2086 		return;
2087 
2088 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2089 
2090 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2091 	ppdu_user_desc->peer_id = peer_id;
2092 
2093 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2094 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2095 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2096 }
2097 
2098 /*
2099  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2100  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2101  * pdev: DP PDE handle
2102  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2103  * @ppdu_info: per ppdu tlv structure
2104  *
2105  * return:void
2106  */
2107 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2108 		struct dp_pdev *pdev, uint32_t *tag_buf,
2109 		struct ppdu_info *ppdu_info)
2110 {
2111 	uint16_t peer_id;
2112 	struct dp_peer *peer;
2113 	struct cdp_tx_completion_ppdu *ppdu_desc;
2114 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2115 	uint8_t curr_user_index = 0;
2116 
2117 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2118 
2119 	tag_buf += 2;
2120 	peer_id =
2121 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2122 
2123 
2124 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2125 
2126 	if (!peer)
2127 		return;
2128 
2129 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2130 
2131 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2132 	ppdu_user_desc->peer_id = peer_id;
2133 
2134 	tag_buf++;
2135 	ppdu_user_desc->tid =
2136 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2137 	ppdu_user_desc->num_mpdu =
2138 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2139 
2140 	ppdu_user_desc->num_msdu =
2141 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2142 
2143 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2144 
2145 	tag_buf += 2;
2146 	ppdu_user_desc->success_bytes = *tag_buf;
2147 
2148 }
2149 
2150 /*
2151  * dp_process_ppdu_stats_user_common_array_tlv: Process
2152  * htt_ppdu_stats_user_common_array_tlv
2153  * pdev: DP PDEV handle
2154  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2155  * @ppdu_info: per ppdu tlv structure
2156  *
2157  * return:void
2158  */
2159 static void dp_process_ppdu_stats_user_common_array_tlv(
2160 		struct dp_pdev *pdev, uint32_t *tag_buf,
2161 		struct ppdu_info *ppdu_info)
2162 {
2163 	uint32_t peer_id;
2164 	struct dp_peer *peer;
2165 	struct cdp_tx_completion_ppdu *ppdu_desc;
2166 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2167 	uint8_t curr_user_index = 0;
2168 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2169 
2170 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2171 
2172 	tag_buf++;
2173 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2174 	tag_buf += 3;
2175 	peer_id =
2176 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2177 
2178 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2179 
2180 	if (!peer) {
2181 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2182 			"Invalid peer");
2183 		return;
2184 	}
2185 
2186 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2187 
2188 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2189 
2190 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2191 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2192 
2193 	tag_buf++;
2194 
2195 	ppdu_user_desc->success_msdus =
2196 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2197 	ppdu_user_desc->retry_bytes =
2198 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2199 	tag_buf++;
2200 	ppdu_user_desc->failed_msdus =
2201 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2202 }
2203 
2204 /*
2205  * dp_process_ppdu_stats_flush_tlv: Process
2206  * htt_ppdu_stats_flush_tlv
2207  * @pdev: DP PDEV handle
2208  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2209  *
2210  * return:void
2211  */
2212 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2213 						uint32_t *tag_buf)
2214 {
2215 	uint32_t peer_id;
2216 	uint32_t drop_reason;
2217 	uint8_t tid;
2218 	uint32_t num_msdu;
2219 	struct dp_peer *peer;
2220 
2221 	tag_buf++;
2222 	drop_reason = *tag_buf;
2223 
2224 	tag_buf++;
2225 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2226 
2227 	tag_buf++;
2228 	peer_id =
2229 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2230 
2231 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2232 	if (!peer)
2233 		return;
2234 
2235 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2236 
2237 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2238 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2239 					num_msdu);
2240 	}
2241 }
2242 
2243 /*
2244  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2245  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2246  * @pdev: DP PDEV handle
2247  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2248  * @length: tlv_length
2249  *
2250  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2251  */
2252 static QDF_STATUS
2253 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2254 					      qdf_nbuf_t tag_buf,
2255 					      uint32_t ppdu_id)
2256 {
2257 	uint32_t *nbuf_ptr;
2258 	uint8_t trim_size;
2259 
2260 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2261 	    (!pdev->bpr_enable))
2262 		return QDF_STATUS_SUCCESS;
2263 
2264 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2265 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2266 		      qdf_nbuf_data(tag_buf));
2267 
2268 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2269 		return QDF_STATUS_SUCCESS;
2270 
2271 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2272 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2273 
2274 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2275 				tag_buf, sizeof(ppdu_id));
2276 	*nbuf_ptr = ppdu_id;
2277 
2278 	if (pdev->bpr_enable) {
2279 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2280 				     tag_buf, HTT_INVALID_PEER,
2281 				     WDI_NO_VAL, pdev->pdev_id);
2282 	}
2283 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2284 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2285 				     tag_buf, HTT_INVALID_PEER,
2286 				     WDI_NO_VAL, pdev->pdev_id);
2287 	}
2288 
2289 	return QDF_STATUS_E_ALREADY;
2290 }
2291 
2292 /**
2293  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2294  * @pdev: DP pdev handle
2295  * @tag_buf: TLV buffer
2296  * @tlv_len: length of tlv
2297  * @ppdu_info: per ppdu tlv structure
2298  *
2299  * return: void
2300  */
2301 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2302 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2303 {
2304 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2305 
2306 	switch (tlv_type) {
2307 	case HTT_PPDU_STATS_COMMON_TLV:
2308 		qdf_assert_always(tlv_len ==
2309 				sizeof(htt_ppdu_stats_common_tlv));
2310 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2311 		break;
2312 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2313 		qdf_assert_always(tlv_len ==
2314 				sizeof(htt_ppdu_stats_user_common_tlv));
2315 		dp_process_ppdu_stats_user_common_tlv(
2316 				pdev, tag_buf, ppdu_info);
2317 		break;
2318 	case HTT_PPDU_STATS_USR_RATE_TLV:
2319 		qdf_assert_always(tlv_len ==
2320 				sizeof(htt_ppdu_stats_user_rate_tlv));
2321 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2322 		break;
2323 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2324 		qdf_assert_always(tlv_len ==
2325 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2326 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2327 				pdev, tag_buf, ppdu_info);
2328 		break;
2329 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2330 		qdf_assert_always(tlv_len ==
2331 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2332 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2333 				pdev, tag_buf, ppdu_info);
2334 		break;
2335 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2336 		qdf_assert_always(tlv_len ==
2337 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2338 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2339 				pdev, tag_buf, ppdu_info);
2340 		break;
2341 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2342 		qdf_assert_always(tlv_len ==
2343 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2344 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2345 				pdev, tag_buf, ppdu_info);
2346 		break;
2347 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2348 		qdf_assert_always(tlv_len ==
2349 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2350 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2351 				pdev, tag_buf, ppdu_info);
2352 		break;
2353 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2354 		qdf_assert_always(tlv_len ==
2355 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2356 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2357 				pdev, tag_buf, ppdu_info);
2358 		break;
2359 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2360 		qdf_assert_always(tlv_len ==
2361 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2362 		dp_process_ppdu_stats_user_common_array_tlv(
2363 				pdev, tag_buf, ppdu_info);
2364 		break;
2365 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2366 		qdf_assert_always(tlv_len ==
2367 			sizeof(htt_ppdu_stats_flush_tlv));
2368 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2369 				pdev, tag_buf);
2370 		break;
2371 	default:
2372 		break;
2373 	}
2374 }
2375 
2376 /**
2377  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2378  * to upper layer
2379  * @pdev: DP pdev handle
2380  * @ppdu_info: per PPDU TLV descriptor
2381  *
2382  * return: void
2383  */
2384 static
2385 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2386 			      struct ppdu_info *ppdu_info)
2387 {
2388 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2389 	struct dp_peer *peer = NULL;
2390 	qdf_nbuf_t nbuf;
2391 	uint16_t i;
2392 
2393 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2394 		qdf_nbuf_data(ppdu_info->nbuf);
2395 
2396 	ppdu_desc->num_users = ppdu_info->last_user;
2397 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2398 
2399 	for (i = 0; i < ppdu_desc->num_users; i++) {
2400 
2401 
2402 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2403 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2404 
2405 		if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2406 			peer = dp_peer_find_by_id(pdev->soc,
2407 					ppdu_desc->user[i].peer_id);
2408 			/**
2409 			 * This check is to make sure peer is not deleted
2410 			 * after processing the TLVs.
2411 			 */
2412 			if (!peer)
2413 				continue;
2414 
2415 			dp_tx_stats_update(pdev->soc, peer,
2416 					&ppdu_desc->user[i],
2417 					ppdu_desc->ack_rssi);
2418 		}
2419 	}
2420 
2421 	/*
2422 	 * Remove from the list
2423 	 */
2424 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2425 	nbuf = ppdu_info->nbuf;
2426 	pdev->list_depth--;
2427 	qdf_mem_free(ppdu_info);
2428 
2429 	qdf_assert_always(nbuf);
2430 
2431 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2432 		qdf_nbuf_data(nbuf);
2433 
2434 	/**
2435 	 * Deliver PPDU stats only for valid (acked) data frames if
2436 	 * sniffer mode is not enabled.
2437 	 * If sniffer mode is enabled, PPDU stats for all frames
2438 	 * including mgmt/control frames should be delivered to upper layer
2439 	 */
2440 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2441 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2442 				nbuf, HTT_INVALID_PEER,
2443 				WDI_NO_VAL, pdev->pdev_id);
2444 	} else {
2445 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2446 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2447 
2448 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2449 					pdev->soc, nbuf, HTT_INVALID_PEER,
2450 					WDI_NO_VAL, pdev->pdev_id);
2451 		} else
2452 			qdf_nbuf_free(nbuf);
2453 	}
2454 	return;
2455 }
2456 
2457 /**
2458  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2459  * desc for new ppdu id
2460  * @pdev: DP pdev handle
2461  * @ppdu_id: PPDU unique identifier
2462  * @tlv_type: TLV type received
2463  *
2464  * return: ppdu_info per ppdu tlv structure
2465  */
2466 static
2467 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2468 			uint8_t tlv_type)
2469 {
2470 	struct ppdu_info *ppdu_info = NULL;
2471 
2472 	/*
2473 	 * Find ppdu_id node exists or not
2474 	 */
2475 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2476 
2477 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2478 			break;
2479 		}
2480 	}
2481 
2482 	if (ppdu_info) {
2483 		/**
2484 		 * if we get tlv_type that is already been processed for ppdu,
2485 		 * that means we got a new ppdu with same ppdu id.
2486 		 * Hence Flush the older ppdu
2487 		 */
2488 		if (ppdu_info->tlv_bitmap & (1 << tlv_type))
2489 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2490 		else
2491 			return ppdu_info;
2492 	}
2493 
2494 	/**
2495 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2496 	 * threshold
2497 	 */
2498 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2499 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2500 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2501 	}
2502 
2503 	/*
2504 	 * Allocate new ppdu_info node
2505 	 */
2506 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2507 	if (!ppdu_info)
2508 		return NULL;
2509 
2510 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2511 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2512 			TRUE);
2513 	if (!ppdu_info->nbuf) {
2514 		qdf_mem_free(ppdu_info);
2515 		return NULL;
2516 	}
2517 
2518 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2519 			sizeof(struct cdp_tx_completion_ppdu));
2520 
2521 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2522 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2523 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2524 				"No tailroom for HTT PPDU");
2525 		qdf_nbuf_free(ppdu_info->nbuf);
2526 		ppdu_info->nbuf = NULL;
2527 		ppdu_info->last_user = 0;
2528 		qdf_mem_free(ppdu_info);
2529 		return NULL;
2530 	}
2531 
2532 	/**
2533 	 * No lock is needed because all PPDU TLVs are processed in
2534 	 * same context and this list is updated in same context
2535 	 */
2536 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2537 			ppdu_info_list_elem);
2538 	pdev->list_depth++;
2539 	return ppdu_info;
2540 }
2541 
2542 /**
2543  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2544  * @pdev: DP pdev handle
2545  * @htt_t2h_msg: HTT target to host message
2546  *
2547  * return: ppdu_info per ppdu tlv structure
2548  */
2549 
2550 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2551 		qdf_nbuf_t htt_t2h_msg)
2552 {
2553 	uint32_t length;
2554 	uint32_t ppdu_id;
2555 	uint8_t tlv_type;
2556 	uint32_t tlv_length, tlv_bitmap_expected;
2557 	uint8_t *tlv_buf;
2558 	struct ppdu_info *ppdu_info = NULL;
2559 
2560 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2561 
2562 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2563 
2564 	msg_word = msg_word + 1;
2565 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2566 
2567 
2568 	msg_word = msg_word + 3;
2569 	while (length > 0) {
2570 		tlv_buf = (uint8_t *)msg_word;
2571 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2572 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2573 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2574 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2575 
2576 		if (tlv_length == 0)
2577 			break;
2578 
2579 		tlv_length += HTT_TLV_HDR_LEN;
2580 
2581 		/**
2582 		 * Not allocating separate ppdu descriptor for MGMT Payload
2583 		 * TLV as this is sent as separate WDI indication and it
2584 		 * doesn't contain any ppdu information
2585 		 */
2586 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2587 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
2588 			pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length;
2589 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
2590 			msg_word =
2591 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2592 			length -= (tlv_length);
2593 			continue;
2594 		}
2595 
2596 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2597 		if (!ppdu_info)
2598 			return NULL;
2599 		ppdu_info->ppdu_id = ppdu_id;
2600 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2601 
2602 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2603 
2604 		/**
2605 		 * Increment pdev level tlv count to monitor
2606 		 * missing TLVs
2607 		 */
2608 		pdev->tlv_count++;
2609 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2610 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2611 		length -= (tlv_length);
2612 	}
2613 
2614 	if (!ppdu_info)
2615 		return NULL;
2616 
2617 	pdev->last_ppdu_id = ppdu_id;
2618 
2619 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2620 
2621 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2622 		if (ppdu_info->is_ampdu)
2623 			tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP;
2624 	}
2625 
2626 	/**
2627 	 * Once all the TLVs for a given PPDU has been processed,
2628 	 * return PPDU status to be delivered to higher layer
2629 	 */
2630 	if (ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2631 		return ppdu_info;
2632 
2633 	return NULL;
2634 }
2635 #endif /* FEATURE_PERPKT_INFO */
2636 
2637 /**
2638  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2639  * @soc: DP SOC handle
2640  * @pdev_id: pdev id
2641  * @htt_t2h_msg: HTT message nbuf
2642  *
2643  * return:void
2644  */
2645 #if defined(WDI_EVENT_ENABLE)
2646 #ifdef FEATURE_PERPKT_INFO
2647 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2648 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2649 {
2650 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2651 	struct ppdu_info *ppdu_info = NULL;
2652 	bool free_buf = true;
2653 
2654 	if (!pdev)
2655 		return true;
2656 
2657 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2658 	    !pdev->mcopy_mode && !pdev->bpr_enable)
2659 		return free_buf;
2660 
2661 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
2662 
2663 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
2664 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
2665 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
2666 		    QDF_STATUS_SUCCESS)
2667 			free_buf = false;
2668 
2669 		pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
2670 		pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
2671 		pdev->mgmtctrl_frm_info.ppdu_id = 0;
2672 	}
2673 
2674 	if (ppdu_info)
2675 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2676 
2677 	return free_buf;
2678 }
2679 #else
2680 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2681 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2682 {
2683 	return true;
2684 }
2685 #endif
2686 #endif
2687 
2688 /**
2689  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2690  * @soc: DP SOC handle
2691  * @htt_t2h_msg: HTT message nbuf
2692  *
2693  * return:void
2694  */
2695 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2696 		qdf_nbuf_t htt_t2h_msg)
2697 {
2698 	uint8_t done;
2699 	qdf_nbuf_t msg_copy;
2700 	uint32_t *msg_word;
2701 
2702 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2703 	msg_word = msg_word + 3;
2704 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2705 
2706 	/*
2707 	 * HTT EXT stats response comes as stream of TLVs which span over
2708 	 * multiple T2H messages.
2709 	 * The first message will carry length of the response.
2710 	 * For rest of the messages length will be zero.
2711 	 *
2712 	 * Clone the T2H message buffer and store it in a list to process
2713 	 * it later.
2714 	 *
2715 	 * The original T2H message buffers gets freed in the T2H HTT event
2716 	 * handler
2717 	 */
2718 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2719 
2720 	if (!msg_copy) {
2721 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2722 				"T2H messge clone failed for HTT EXT STATS");
2723 		goto error;
2724 	}
2725 
2726 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2727 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2728 	/*
2729 	 * Done bit signifies that this is the last T2H buffer in the stream of
2730 	 * HTT EXT STATS message
2731 	 */
2732 	if (done) {
2733 		soc->htt_stats.num_stats++;
2734 		qdf_sched_work(0, &soc->htt_stats.work);
2735 	}
2736 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2737 
2738 	return;
2739 
2740 error:
2741 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2742 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2743 			!= NULL) {
2744 		qdf_nbuf_free(msg_copy);
2745 	}
2746 	soc->htt_stats.num_stats = 0;
2747 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2748 	return;
2749 
2750 }
2751 
2752 /*
2753  * htt_soc_attach_target() - SOC level HTT setup
2754  * @htt_soc:	HTT SOC handle
2755  *
2756  * Return: 0 on success; error code on failure
2757  */
2758 int htt_soc_attach_target(void *htt_soc)
2759 {
2760 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2761 
2762 	return htt_h2t_ver_req_msg(soc);
2763 }
2764 
2765 
2766 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2767 /*
2768  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2769  * @htt_soc:	 HTT SOC handle
2770  * @msg_word:    Pointer to payload
2771  * @htt_t2h_msg: HTT msg nbuf
2772  *
2773  * Return: True if buffer should be freed by caller.
2774  */
2775 static bool
2776 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2777 				uint32_t *msg_word,
2778 				qdf_nbuf_t htt_t2h_msg)
2779 {
2780 	u_int8_t pdev_id;
2781 	bool free_buf;
2782 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2783 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2784 		"received HTT_T2H_MSG_TYPE_PPDU_STATS_IND");
2785 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2786 	pdev_id = DP_HW2SW_MACID(pdev_id);
2787 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2788 					      htt_t2h_msg);
2789 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2790 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2791 		pdev_id);
2792 	return free_buf;
2793 }
2794 #else
2795 static bool
2796 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2797 				uint32_t *msg_word,
2798 				qdf_nbuf_t htt_t2h_msg)
2799 {
2800 	return true;
2801 }
2802 #endif
2803 
2804 #if defined(WDI_EVENT_ENABLE) && \
2805 		!defined(REMOVE_PKT_LOG) && defined(CONFIG_WIN)
2806 /*
2807  * dp_pktlog_msg_handler() - Pktlog msg handler
2808  * @htt_soc:	 HTT SOC handle
2809  * @msg_word:    Pointer to payload
2810  *
2811  * Return: None
2812  */
2813 static void
2814 dp_pktlog_msg_handler(struct htt_soc *soc,
2815 				uint32_t *msg_word)
2816 {
2817 	uint8_t pdev_id;
2818 	uint32_t *pl_hdr;
2819 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2820 		"received HTT_T2H_MSG_TYPE_PKTLOG");
2821 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2822 	pdev_id = DP_HW2SW_MACID(pdev_id);
2823 	pl_hdr = (msg_word + 1);
2824 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2825 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2826 		pdev_id);
2827 }
2828 #else
2829 static void
2830 dp_pktlog_msg_handler(struct htt_soc *soc,
2831 				uint32_t *msg_word)
2832 {
2833 }
2834 #endif
2835 
2836 /*
2837  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2838  * @context:	Opaque context (HTT SOC handle)
2839  * @pkt:	HTC packet
2840  */
2841 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2842 {
2843 	struct htt_soc *soc = (struct htt_soc *) context;
2844 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2845 	u_int32_t *msg_word;
2846 	enum htt_t2h_msg_type msg_type;
2847 	bool free_buf = true;
2848 
2849 	/* check for successful message reception */
2850 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2851 		if (pkt->Status != QDF_STATUS_E_CANCELED)
2852 			soc->stats.htc_err_cnt++;
2853 
2854 		qdf_nbuf_free(htt_t2h_msg);
2855 		return;
2856 	}
2857 
2858 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
2859 
2860 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
2861 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2862 	switch (msg_type) {
2863 	case HTT_T2H_MSG_TYPE_PEER_MAP:
2864 		{
2865 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
2866 			u_int8_t *peer_mac_addr;
2867 			u_int16_t peer_id;
2868 			u_int16_t hw_peer_id;
2869 			u_int8_t vdev_id;
2870 
2871 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
2872 			hw_peer_id =
2873 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
2874 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
2875 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
2876 				(u_int8_t *) (msg_word+1),
2877 				&mac_addr_deswizzle_buf[0]);
2878 			QDF_TRACE(QDF_MODULE_ID_TXRX,
2879 				QDF_TRACE_LEVEL_INFO,
2880 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
2881 				peer_id, vdev_id);
2882 
2883 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
2884 						vdev_id, peer_mac_addr);
2885 			break;
2886 		}
2887 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
2888 		{
2889 			u_int16_t peer_id;
2890 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
2891 
2892 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id);
2893 			break;
2894 		}
2895 	case HTT_T2H_MSG_TYPE_SEC_IND:
2896 		{
2897 			u_int16_t peer_id;
2898 			enum htt_sec_type sec_type;
2899 			int is_unicast;
2900 
2901 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
2902 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
2903 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
2904 			/* point to the first part of the Michael key */
2905 			msg_word++;
2906 			dp_rx_sec_ind_handler(
2907 				soc->dp_soc, peer_id, sec_type, is_unicast,
2908 				msg_word, msg_word + 2);
2909 			break;
2910 		}
2911 
2912 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
2913 		{
2914 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
2915 							     htt_t2h_msg);
2916 			break;
2917 		}
2918 
2919 	case HTT_T2H_MSG_TYPE_PKTLOG:
2920 		{
2921 			dp_pktlog_msg_handler(soc, msg_word);
2922 			break;
2923 		}
2924 
2925 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
2926 		{
2927 			htc_pm_runtime_put(soc->htc_soc);
2928 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
2929 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
2930 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2931 				"target uses HTT version %d.%d; host uses %d.%d",
2932 				soc->tgt_ver.major, soc->tgt_ver.minor,
2933 				HTT_CURRENT_VERSION_MAJOR,
2934 				HTT_CURRENT_VERSION_MINOR);
2935 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
2936 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2937 					QDF_TRACE_LEVEL_ERROR,
2938 					"*** Incompatible host/target HTT versions!");
2939 			}
2940 			/* abort if the target is incompatible with the host */
2941 			qdf_assert(soc->tgt_ver.major ==
2942 				HTT_CURRENT_VERSION_MAJOR);
2943 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
2944 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2945 					QDF_TRACE_LEVEL_WARN,
2946 					"*** Warning: host/target HTT versions"
2947 					" are different, though compatible!");
2948 			}
2949 			break;
2950 		}
2951 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
2952 		{
2953 			uint16_t peer_id;
2954 			uint8_t tid;
2955 			uint8_t win_sz;
2956 			uint16_t status;
2957 			struct dp_peer *peer;
2958 
2959 			/*
2960 			 * Update REO Queue Desc with new values
2961 			 */
2962 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
2963 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
2964 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
2965 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
2966 
2967 			/*
2968 			 * Window size needs to be incremented by 1
2969 			 * since fw needs to represent a value of 256
2970 			 * using just 8 bits
2971 			 */
2972 			if (peer) {
2973 				status = dp_addba_requestprocess_wifi3(peer,
2974 						0, tid, 0, win_sz + 1, 0xffff);
2975 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2976 					QDF_TRACE_LEVEL_INFO,
2977 					FL("PeerID %d BAW %d TID %d stat %d"),
2978 					peer_id, win_sz, tid, status);
2979 
2980 			} else {
2981 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2982 					QDF_TRACE_LEVEL_ERROR,
2983 					FL("Peer not found peer id %d"),
2984 					peer_id);
2985 			}
2986 			break;
2987 		}
2988 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
2989 		{
2990 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
2991 			break;
2992 		}
2993 	default:
2994 		break;
2995 	};
2996 
2997 	/* Free the indication buffer */
2998 	if (free_buf)
2999 		qdf_nbuf_free(htt_t2h_msg);
3000 }
3001 
3002 /*
3003  * dp_htt_h2t_full() - Send full handler (called from HTC)
3004  * @context:	Opaque context (HTT SOC handle)
3005  * @pkt:	HTC packet
3006  *
3007  * Return: enum htc_send_full_action
3008  */
3009 static enum htc_send_full_action
3010 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3011 {
3012 	return HTC_SEND_FULL_KEEP;
3013 }
3014 
3015 /*
3016  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3017  * @context:	Opaque context (HTT SOC handle)
3018  * @nbuf:	nbuf containing T2H message
3019  * @pipe_id:	HIF pipe ID
3020  *
3021  * Return: QDF_STATUS
3022  *
3023  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3024  * will be used for packet log and other high-priority HTT messages. Proper
3025  * HTC connection to be added later once required FW changes are available
3026  */
3027 static QDF_STATUS
3028 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3029 {
3030 	A_STATUS rc = QDF_STATUS_SUCCESS;
3031 	HTC_PACKET htc_pkt;
3032 
3033 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3034 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3035 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3036 	htc_pkt.pPktContext = (void *)nbuf;
3037 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3038 
3039 	return rc;
3040 }
3041 
3042 /*
3043  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3044  * @htt_soc:	HTT SOC handle
3045  *
3046  * Return: 0 on success; error code on failure
3047  */
3048 static int
3049 htt_htc_soc_attach(struct htt_soc *soc)
3050 {
3051 	struct htc_service_connect_req connect;
3052 	struct htc_service_connect_resp response;
3053 	A_STATUS status;
3054 	struct dp_soc *dpsoc = soc->dp_soc;
3055 
3056 	qdf_mem_set(&connect, sizeof(connect), 0);
3057 	qdf_mem_set(&response, sizeof(response), 0);
3058 
3059 	connect.pMetaData = NULL;
3060 	connect.MetaDataLength = 0;
3061 	connect.EpCallbacks.pContext = soc;
3062 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3063 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3064 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3065 
3066 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3067 	connect.EpCallbacks.EpRecvRefill = NULL;
3068 
3069 	/* N/A, fill is done by HIF */
3070 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3071 
3072 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3073 	/*
3074 	 * Specify how deep to let a queue get before htc_send_pkt will
3075 	 * call the EpSendFull function due to excessive send queue depth.
3076 	 */
3077 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3078 
3079 	/* disable flow control for HTT data message service */
3080 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3081 
3082 	/* connect to control service */
3083 	connect.service_id = HTT_DATA_MSG_SVC;
3084 
3085 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3086 
3087 	if (status != A_OK)
3088 		return QDF_STATUS_E_FAILURE;
3089 
3090 	soc->htc_endpoint = response.Endpoint;
3091 
3092 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3093 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3094 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3095 
3096 	return 0; /* success */
3097 }
3098 
3099 /*
3100  * htt_soc_attach() - SOC level HTT initialization
3101  * @dp_soc:	Opaque Data path SOC handle
3102  * @ctrl_psoc:	Opaque ctrl SOC handle
3103  * @htc_soc:	SOC level HTC handle
3104  * @hal_soc:	Opaque HAL SOC handle
3105  * @osdev:	QDF device
3106  *
3107  * Return: HTT handle on success; NULL on failure
3108  */
3109 void *
3110 htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3111 	void *hal_soc, qdf_device_t osdev)
3112 {
3113 	struct htt_soc *soc;
3114 	int i;
3115 
3116 	soc = qdf_mem_malloc(sizeof(*soc));
3117 
3118 	if (!soc)
3119 		goto fail1;
3120 
3121 	soc->osdev = osdev;
3122 	soc->ctrl_psoc = ctrl_psoc;
3123 	soc->dp_soc = dp_soc;
3124 	soc->htc_soc = htc_soc;
3125 	soc->hal_soc = hal_soc;
3126 
3127 	/* TODO: See if any NSS related context is required in htt_soc */
3128 
3129 	soc->htt_htc_pkt_freelist = NULL;
3130 
3131 	if (htt_htc_soc_attach(soc))
3132 		goto fail2;
3133 
3134 	/* TODO: See if any Rx data specific intialization is required. For
3135 	 * MCL use cases, the data will be received as single packet and
3136 	 * should not required any descriptor or reorder handling
3137 	 */
3138 
3139 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3140 
3141 	/* pre-allocate some HTC_PACKET objects */
3142 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3143 		struct dp_htt_htc_pkt_union *pkt;
3144 		pkt = qdf_mem_malloc(sizeof(*pkt));
3145 		if (!pkt)
3146 			break;
3147 
3148 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3149 	}
3150 
3151 	return soc;
3152 
3153 fail2:
3154 	qdf_mem_free(soc);
3155 
3156 fail1:
3157 	return NULL;
3158 }
3159 
3160 
3161 /*
3162  * htt_soc_detach() - Detach SOC level HTT
3163  * @htt_soc:	HTT SOC handle
3164  */
3165 void
3166 htt_soc_detach(void *htt_soc)
3167 {
3168 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3169 
3170 	htt_htc_misc_pkt_pool_free(soc);
3171 	htt_htc_pkt_pool_free(soc);
3172 	HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex);
3173 	qdf_mem_free(soc);
3174 }
3175 
3176 /**
3177  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3178  * @pdev: DP PDEV handle
3179  * @stats_type_upload_mask: stats type requested by user
3180  * @config_param_0: extra configuration parameters
3181  * @config_param_1: extra configuration parameters
3182  * @config_param_2: extra configuration parameters
3183  * @config_param_3: extra configuration parameters
3184  * @mac_id: mac number
3185  *
3186  * return: QDF STATUS
3187  */
3188 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3189 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3190 		uint32_t config_param_1, uint32_t config_param_2,
3191 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3192 		uint8_t mac_id)
3193 {
3194 	struct htt_soc *soc = pdev->soc->htt_handle;
3195 	struct dp_htt_htc_pkt *pkt;
3196 	qdf_nbuf_t msg;
3197 	uint32_t *msg_word;
3198 	uint8_t pdev_mask = 0;
3199 
3200 	msg = qdf_nbuf_alloc(
3201 			soc->osdev,
3202 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3203 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3204 
3205 	if (!msg)
3206 		return QDF_STATUS_E_NOMEM;
3207 
3208 	/*TODO:Add support for SOC stats
3209 	 * Bit 0: SOC Stats
3210 	 * Bit 1: Pdev stats for pdev id 0
3211 	 * Bit 2: Pdev stats for pdev id 1
3212 	 * Bit 3: Pdev stats for pdev id 2
3213 	 */
3214 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3215 
3216 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3217 	/*
3218 	 * Set the length of the message.
3219 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3220 	 * separately during the below call to qdf_nbuf_push_head.
3221 	 * The contribution from the HTC header is added separately inside HTC.
3222 	 */
3223 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3224 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3225 				"Failed to expand head for HTT_EXT_STATS");
3226 		qdf_nbuf_free(msg);
3227 		return QDF_STATUS_E_FAILURE;
3228 	}
3229 
3230 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3231 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3232 		"config_param_1 %u\n config_param_2 %u\n"
3233 		"config_param_4 %u\n -------------",
3234 		__func__, __LINE__, cookie_val, config_param_0,
3235 		config_param_1, config_param_2,	config_param_3);
3236 
3237 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3238 
3239 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3240 	*msg_word = 0;
3241 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3242 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3243 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3244 
3245 	/* word 1 */
3246 	msg_word++;
3247 	*msg_word = 0;
3248 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3249 
3250 	/* word 2 */
3251 	msg_word++;
3252 	*msg_word = 0;
3253 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3254 
3255 	/* word 3 */
3256 	msg_word++;
3257 	*msg_word = 0;
3258 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3259 
3260 	/* word 4 */
3261 	msg_word++;
3262 	*msg_word = 0;
3263 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3264 
3265 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3266 
3267 	/* word 5 */
3268 	msg_word++;
3269 
3270 	/* word 6 */
3271 	msg_word++;
3272 	*msg_word = 0;
3273 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3274 
3275 	/* word 7 */
3276 	msg_word++;
3277 	*msg_word = 0;
3278 	/*Using last 2 bits for pdev_id */
3279 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3280 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3281 
3282 	pkt = htt_htc_pkt_alloc(soc);
3283 	if (!pkt) {
3284 		qdf_nbuf_free(msg);
3285 		return QDF_STATUS_E_NOMEM;
3286 	}
3287 
3288 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3289 
3290 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3291 			dp_htt_h2t_send_complete_free_netbuf,
3292 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3293 			soc->htc_endpoint,
3294 			1); /* tag - not relevant here */
3295 
3296 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3297 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3298 	return 0;
3299 }
3300 
3301 /* This macro will revert once proper HTT header will define for
3302  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3303  * */
3304 #if defined(WDI_EVENT_ENABLE)
3305 /**
3306  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3307  * @pdev: DP PDEV handle
3308  * @stats_type_upload_mask: stats type requested by user
3309  * @mac_id: Mac id number
3310  *
3311  * return: QDF STATUS
3312  */
3313 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3314 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3315 {
3316 	struct htt_soc *soc = pdev->soc->htt_handle;
3317 	struct dp_htt_htc_pkt *pkt;
3318 	qdf_nbuf_t msg;
3319 	uint32_t *msg_word;
3320 	uint8_t pdev_mask;
3321 
3322 	msg = qdf_nbuf_alloc(
3323 			soc->osdev,
3324 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3325 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3326 
3327 	if (!msg) {
3328 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3329 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
3330 		qdf_assert(0);
3331 		return QDF_STATUS_E_NOMEM;
3332 	}
3333 
3334 	/*TODO:Add support for SOC stats
3335 	 * Bit 0: SOC Stats
3336 	 * Bit 1: Pdev stats for pdev id 0
3337 	 * Bit 2: Pdev stats for pdev id 1
3338 	 * Bit 3: Pdev stats for pdev id 2
3339 	 */
3340 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3341 
3342 	/*
3343 	 * Set the length of the message.
3344 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3345 	 * separately during the below call to qdf_nbuf_push_head.
3346 	 * The contribution from the HTC header is added separately inside HTC.
3347 	 */
3348 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3349 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3350 				"Failed to expand head for HTT_CFG_STATS");
3351 		qdf_nbuf_free(msg);
3352 		return QDF_STATUS_E_FAILURE;
3353 	}
3354 
3355 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3356 
3357 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3358 	*msg_word = 0;
3359 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3360 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3361 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3362 			stats_type_upload_mask);
3363 
3364 	pkt = htt_htc_pkt_alloc(soc);
3365 	if (!pkt) {
3366 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3367 				"Fail to allocate dp_htt_htc_pkt buffer");
3368 		qdf_assert(0);
3369 		qdf_nbuf_free(msg);
3370 		return QDF_STATUS_E_NOMEM;
3371 	}
3372 
3373 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3374 
3375 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3376 			dp_htt_h2t_send_complete_free_netbuf,
3377 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3378 			soc->htc_endpoint,
3379 			1); /* tag - not relevant here */
3380 
3381 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3382 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3383 	return 0;
3384 }
3385 #endif
3386