xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_hw_headers.h>
21 #include <hal_api.h>
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx_mon.h"
26 #include "htt_stats.h"
27 #include "htt_ppdu_stats.h"
28 #include "dp_htt.h"
29 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
30 #include "cdp_txrx_cmn_struct.h"
31 
32 #ifdef FEATURE_PERPKT_INFO
33 #include "dp_ratetable.h"
34 #endif
35 
36 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
37 
38 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
39 #define HTT_T2H_MAX_MSG_SIZE 2048
40 
41 #define HTT_MSG_BUF_SIZE(msg_bytes) \
42 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43 
44 #define HTT_PID_BIT_MASK 0x3
45 
46 #define DP_EXT_MSG_LENGTH 2048
47 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
48 do {                                                             \
49 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
50 					QDF_STATUS_SUCCESS)      \
51 		htt_htc_misc_pkt_list_add(soc, pkt);             \
52 } while (0)
53 
54 #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
55 
56 /*
57  * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
58  * bitmap for sniffer mode
59  * @bitmap: received bitmap
60  *
61  * Return: expected bitmap value, returns zero if doesn't match with
62  * either 64-bit Tx window or 256-bit window tlv bitmap
63  */
64 int
65 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
66 {
67 	if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
68 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
69 	else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
70 		return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
71 
72 	return 0;
73 }
74 
75 #ifdef FEATURE_PERPKT_INFO
76 /*
77  * dp_tx_rate_stats_update() - Update rate per-peer statistics
78  * @peer: Datapath peer handle
79  * @ppdu: PPDU Descriptor
80  *
81  * Return: None
82  */
83 static void
84 dp_tx_rate_stats_update(struct dp_peer *peer,
85 			struct cdp_tx_completion_ppdu_user *ppdu)
86 {
87 	uint32_t ratekbps = 0;
88 	uint64_t ppdu_tx_rate = 0;
89 	uint32_t rix;
90 
91 	if (!peer || !ppdu)
92 		return;
93 
94 	ratekbps = dp_getrateindex(ppdu->gi,
95 				   ppdu->mcs,
96 				   ppdu->nss,
97 				   ppdu->preamble,
98 				   ppdu->bw,
99 				   &rix);
100 
101 	DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
102 
103 	if (!ratekbps)
104 		return;
105 
106 	ppdu->rix = rix;
107 	ppdu->tx_ratekbps = ratekbps;
108 	peer->stats.tx.avg_tx_rate =
109 		dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
110 	ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
111 	DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
112 
113 	if (peer->vdev) {
114 		if (peer->bss_peer) {
115 			peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
116 			peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
117 		} else {
118 			peer->vdev->stats.tx.last_tx_rate = ratekbps;
119 			peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
120 		}
121 	}
122 }
123 
124 /*
125  * dp_tx_stats_update() - Update per-peer statistics
126  * @soc: Datapath soc handle
127  * @peer: Datapath peer handle
128  * @ppdu: PPDU Descriptor
129  * @ack_rssi: RSSI of last ack received
130  *
131  * Return: None
132  */
133 static void
134 dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
135 		   struct cdp_tx_completion_ppdu_user *ppdu,
136 		   uint32_t ack_rssi)
137 {
138 	struct dp_pdev *pdev = peer->vdev->pdev;
139 	uint8_t preamble, mcs;
140 	uint16_t num_msdu;
141 
142 	preamble = ppdu->preamble;
143 	mcs = ppdu->mcs;
144 	num_msdu = ppdu->num_msdu;
145 
146 	/* If the peer statistics are already processed as part of
147 	 * per-MSDU completion handler, do not process these again in per-PPDU
148 	 * indications */
149 	if (soc->process_tx_status)
150 		return;
151 
152 	if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
153 		DP_STATS_INC(peer, tx.retries,
154 			     (ppdu->long_retries + ppdu->short_retries));
155 		DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
156 		return;
157 	}
158 
159 	if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
160 	    ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
161 		if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
162 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
163 				  "mu_group_id out of bound!!\n");
164 		else
165 			DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
166 				     (ppdu->user_pos + 1));
167 	}
168 
169 	if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
170 	    ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
171 		DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
172 		DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
173 		switch (ppdu->ru_tones) {
174 		case RU_26:
175 			DP_STATS_INC(peer, tx.ru_loc[0], num_msdu);
176 		break;
177 		case RU_52:
178 			DP_STATS_INC(peer, tx.ru_loc[1], num_msdu);
179 		break;
180 		case RU_106:
181 			DP_STATS_INC(peer, tx.ru_loc[2], num_msdu);
182 		break;
183 		case RU_242:
184 			DP_STATS_INC(peer, tx.ru_loc[3], num_msdu);
185 		break;
186 		case RU_484:
187 			DP_STATS_INC(peer, tx.ru_loc[4], num_msdu);
188 		break;
189 		case RU_996:
190 			DP_STATS_INC(peer, tx.ru_loc[5], num_msdu);
191 		break;
192 		}
193 	}
194 
195 	DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type], num_msdu);
196 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
197 			num_msdu, (ppdu->success_bytes +
198 				ppdu->retry_bytes + ppdu->failed_bytes));
199 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
200 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
201 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
202 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
203 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
204 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
205 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
206 	if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
207 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
208 
209 	DP_STATS_INCC(peer,
210 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
211 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
212 	DP_STATS_INCC(peer,
213 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
214 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
215 	DP_STATS_INCC(peer,
216 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
217 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
218 	DP_STATS_INCC(peer,
219 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
220 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
221 	DP_STATS_INCC(peer,
222 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
223 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
224 	DP_STATS_INCC(peer,
225 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
226 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
227 	DP_STATS_INCC(peer,
228 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
229 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
230 	DP_STATS_INCC(peer,
231 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
232 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
233 	DP_STATS_INCC(peer,
234 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
235 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
236 	DP_STATS_INCC(peer,
237 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
238 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
239 	DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
240 	DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
241 
242 	dp_peer_stats_notify(peer);
243 
244 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
245 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
246 			     &peer->stats, ppdu->peer_id,
247 			     UPDATE_PEER_STATS, pdev->pdev_id);
248 #endif
249 }
250 #endif
251 
252 #ifdef WLAN_TX_PKT_CAPTURE_ENH
253 #include "dp_tx_capture.h"
254 #else
255 static inline void
256 dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
257 					   void *data,
258 					   uint32_t ppdu_id,
259 					   uint32_t size)
260 {
261 }
262 #endif
263 
264 /*
265  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
266  * @htt_soc:	HTT SOC handle
267  *
268  * Return: Pointer to htc packet buffer
269  */
270 static struct dp_htt_htc_pkt *
271 htt_htc_pkt_alloc(struct htt_soc *soc)
272 {
273 	struct dp_htt_htc_pkt_union *pkt = NULL;
274 
275 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
276 	if (soc->htt_htc_pkt_freelist) {
277 		pkt = soc->htt_htc_pkt_freelist;
278 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
279 	}
280 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
281 
282 	if (!pkt)
283 		pkt = qdf_mem_malloc(sizeof(*pkt));
284 	return &pkt->u.pkt; /* not actually a dereference */
285 }
286 
287 /*
288  * htt_htc_pkt_free() - Free HTC packet buffer
289  * @htt_soc:	HTT SOC handle
290  */
291 static void
292 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
293 {
294 	struct dp_htt_htc_pkt_union *u_pkt =
295 		(struct dp_htt_htc_pkt_union *)pkt;
296 
297 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
298 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
299 	soc->htt_htc_pkt_freelist = u_pkt;
300 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
301 }
302 
303 /*
304  * htt_htc_pkt_pool_free() - Free HTC packet pool
305  * @htt_soc:	HTT SOC handle
306  */
307 static void
308 htt_htc_pkt_pool_free(struct htt_soc *soc)
309 {
310 	struct dp_htt_htc_pkt_union *pkt, *next;
311 	pkt = soc->htt_htc_pkt_freelist;
312 	while (pkt) {
313 		next = pkt->u.next;
314 		qdf_mem_free(pkt);
315 		pkt = next;
316 	}
317 	soc->htt_htc_pkt_freelist = NULL;
318 }
319 
320 /*
321  * htt_htc_misc_pkt_list_trim() - trim misc list
322  * @htt_soc: HTT SOC handle
323  * @level: max no. of pkts in list
324  */
325 static void
326 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
327 {
328 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
329 	int i = 0;
330 	qdf_nbuf_t netbuf;
331 
332 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
333 	pkt = soc->htt_htc_pkt_misclist;
334 	while (pkt) {
335 		next = pkt->u.next;
336 		/* trim the out grown list*/
337 		if (++i > level) {
338 			netbuf =
339 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
340 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
341 			qdf_nbuf_free(netbuf);
342 			qdf_mem_free(pkt);
343 			pkt = NULL;
344 			if (prev)
345 				prev->u.next = NULL;
346 		}
347 		prev = pkt;
348 		pkt = next;
349 	}
350 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
351 }
352 
353 /*
354  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
355  * @htt_soc:	HTT SOC handle
356  * @dp_htt_htc_pkt: pkt to be added to list
357  */
358 static void
359 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
360 {
361 	struct dp_htt_htc_pkt_union *u_pkt =
362 				(struct dp_htt_htc_pkt_union *)pkt;
363 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
364 							pkt->htc_pkt.Endpoint)
365 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
366 
367 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
368 	if (soc->htt_htc_pkt_misclist) {
369 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
370 		soc->htt_htc_pkt_misclist = u_pkt;
371 	} else {
372 		soc->htt_htc_pkt_misclist = u_pkt;
373 	}
374 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
375 
376 	/* only ce pipe size + tx_queue_depth could possibly be in use
377 	 * free older packets in the misclist
378 	 */
379 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
380 }
381 
382 /*
383  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
384  * @htt_soc:	HTT SOC handle
385  */
386 static void
387 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
388 {
389 	struct dp_htt_htc_pkt_union *pkt, *next;
390 	qdf_nbuf_t netbuf;
391 
392 	pkt = soc->htt_htc_pkt_misclist;
393 
394 	while (pkt) {
395 		next = pkt->u.next;
396 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
397 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
398 
399 		soc->stats.htc_pkt_free++;
400 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
401 			 "%s: Pkt free count %d",
402 			 __func__, soc->stats.htc_pkt_free);
403 
404 		qdf_nbuf_free(netbuf);
405 		qdf_mem_free(pkt);
406 		pkt = next;
407 	}
408 	soc->htt_htc_pkt_misclist = NULL;
409 }
410 
411 /*
412  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
413  * @tgt_mac_addr:	Target MAC
414  * @buffer:		Output buffer
415  */
416 static u_int8_t *
417 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
418 {
419 #ifdef BIG_ENDIAN_HOST
420 	/*
421 	 * The host endianness is opposite of the target endianness.
422 	 * To make u_int32_t elements come out correctly, the target->host
423 	 * upload has swizzled the bytes in each u_int32_t element of the
424 	 * message.
425 	 * For byte-array message fields like the MAC address, this
426 	 * upload swizzling puts the bytes in the wrong order, and needs
427 	 * to be undone.
428 	 */
429 	buffer[0] = tgt_mac_addr[3];
430 	buffer[1] = tgt_mac_addr[2];
431 	buffer[2] = tgt_mac_addr[1];
432 	buffer[3] = tgt_mac_addr[0];
433 	buffer[4] = tgt_mac_addr[7];
434 	buffer[5] = tgt_mac_addr[6];
435 	return buffer;
436 #else
437 	/*
438 	 * The host endianness matches the target endianness -
439 	 * we can use the mac addr directly from the message buffer.
440 	 */
441 	return tgt_mac_addr;
442 #endif
443 }
444 
445 /*
446  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
447  * @soc:	SOC handle
448  * @status:	Completion status
449  * @netbuf:	HTT buffer
450  */
451 static void
452 dp_htt_h2t_send_complete_free_netbuf(
453 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
454 {
455 	qdf_nbuf_free(netbuf);
456 }
457 
458 /*
459  * dp_htt_h2t_send_complete() - H2T completion handler
460  * @context:	Opaque context (HTT SOC handle)
461  * @htc_pkt:	HTC packet
462  */
463 static void
464 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
465 {
466 	void (*send_complete_part2)(
467 		void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
468 	struct htt_soc *soc =  (struct htt_soc *) context;
469 	struct dp_htt_htc_pkt *htt_pkt;
470 	qdf_nbuf_t netbuf;
471 
472 	send_complete_part2 = htc_pkt->pPktContext;
473 
474 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
475 
476 	/* process (free or keep) the netbuf that held the message */
477 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
478 	/*
479 	 * adf sendcomplete is required for windows only
480 	 */
481 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
482 	if (send_complete_part2) {
483 		send_complete_part2(
484 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
485 	}
486 	/* free the htt_htc_pkt / HTC_PACKET object */
487 	htt_htc_pkt_free(soc, htt_pkt);
488 }
489 
490 /*
491  * htt_h2t_ver_req_msg() - Send HTT version request message to target
492  * @htt_soc:	HTT SOC handle
493  *
494  * Return: 0 on success; error code on failure
495  */
496 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
497 {
498 	struct dp_htt_htc_pkt *pkt;
499 	qdf_nbuf_t msg;
500 	uint32_t *msg_word;
501 
502 	msg = qdf_nbuf_alloc(
503 		soc->osdev,
504 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
505 		/* reserve room for the HTC header */
506 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
507 	if (!msg)
508 		return QDF_STATUS_E_NOMEM;
509 
510 	/*
511 	 * Set the length of the message.
512 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
513 	 * separately during the below call to qdf_nbuf_push_head.
514 	 * The contribution from the HTC header is added separately inside HTC.
515 	 */
516 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
517 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
518 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
519 			__func__);
520 		return QDF_STATUS_E_FAILURE;
521 	}
522 
523 	/* fill in the message contents */
524 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
525 
526 	/* rewind beyond alignment pad to get to the HTC header reserved area */
527 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
528 
529 	*msg_word = 0;
530 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
531 
532 	pkt = htt_htc_pkt_alloc(soc);
533 	if (!pkt) {
534 		qdf_nbuf_free(msg);
535 		return QDF_STATUS_E_FAILURE;
536 	}
537 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
538 
539 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
540 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
541 		qdf_nbuf_len(msg), soc->htc_endpoint,
542 		1); /* tag - not relevant here */
543 
544 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
545 	DP_HTT_SEND_HTC_PKT(soc, pkt);
546 	return 0;
547 }
548 
549 /*
550  * htt_srng_setup() - Send SRNG setup message to target
551  * @htt_soc:	HTT SOC handle
552  * @mac_id:	MAC Id
553  * @hal_srng:	Opaque HAL SRNG pointer
554  * @hal_ring_type:	SRNG ring type
555  *
556  * Return: 0 on success; error code on failure
557  */
558 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
559 	int hal_ring_type)
560 {
561 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
562 	struct dp_htt_htc_pkt *pkt;
563 	qdf_nbuf_t htt_msg;
564 	uint32_t *msg_word;
565 	struct hal_srng_params srng_params;
566 	qdf_dma_addr_t hp_addr, tp_addr;
567 	uint32_t ring_entry_size =
568 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
569 	int htt_ring_type, htt_ring_id;
570 
571 	/* Sizes should be set in 4-byte words */
572 	ring_entry_size = ring_entry_size >> 2;
573 
574 	htt_msg = qdf_nbuf_alloc(soc->osdev,
575 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
576 		/* reserve room for the HTC header */
577 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
578 	if (!htt_msg)
579 		goto fail0;
580 
581 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
582 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
583 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
584 
585 	switch (hal_ring_type) {
586 	case RXDMA_BUF:
587 #ifdef QCA_HOST2FW_RXBUF_RING
588 		if (srng_params.ring_id ==
589 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
590 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
591 			htt_ring_type = HTT_SW_TO_SW_RING;
592 #ifdef IPA_OFFLOAD
593 		} else if (srng_params.ring_id ==
594 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
595 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
596 			htt_ring_type = HTT_SW_TO_SW_RING;
597 #endif
598 #else
599 		if (srng_params.ring_id ==
600 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
601 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
602 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
603 			htt_ring_type = HTT_SW_TO_HW_RING;
604 #endif
605 		} else if (srng_params.ring_id ==
606 #ifdef IPA_OFFLOAD
607 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
608 #else
609 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
610 #endif
611 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
612 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
613 			htt_ring_type = HTT_SW_TO_HW_RING;
614 		} else {
615 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
616 				   "%s: Ring %d currently not supported",
617 				   __func__, srng_params.ring_id);
618 			goto fail1;
619 		}
620 
621 		dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
622 			hal_ring_type, srng_params.ring_id, htt_ring_id,
623 			(uint64_t)hp_addr,
624 			(uint64_t)tp_addr);
625 		break;
626 	case RXDMA_MONITOR_BUF:
627 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
628 		htt_ring_type = HTT_SW_TO_HW_RING;
629 		break;
630 	case RXDMA_MONITOR_STATUS:
631 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
632 		htt_ring_type = HTT_SW_TO_HW_RING;
633 		break;
634 	case RXDMA_MONITOR_DST:
635 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
636 		htt_ring_type = HTT_HW_TO_SW_RING;
637 		break;
638 	case RXDMA_MONITOR_DESC:
639 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
640 		htt_ring_type = HTT_SW_TO_HW_RING;
641 		break;
642 	case RXDMA_DST:
643 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
644 		htt_ring_type = HTT_HW_TO_SW_RING;
645 		break;
646 
647 	default:
648 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
649 			"%s: Ring currently not supported", __func__);
650 			goto fail1;
651 	}
652 
653 	/*
654 	 * Set the length of the message.
655 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
656 	 * separately during the below call to qdf_nbuf_push_head.
657 	 * The contribution from the HTC header is added separately inside HTC.
658 	 */
659 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
660 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
661 			"%s: Failed to expand head for SRING_SETUP msg",
662 			__func__);
663 		return QDF_STATUS_E_FAILURE;
664 	}
665 
666 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
667 
668 	/* rewind beyond alignment pad to get to the HTC header reserved area */
669 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
670 
671 	/* word 0 */
672 	*msg_word = 0;
673 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
674 
675 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
676 			(htt_ring_type == HTT_HW_TO_SW_RING))
677 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
678 			 DP_SW2HW_MACID(mac_id));
679 	else
680 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
681 
682 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
683 		  "%s: mac_id %d", __func__, mac_id);
684 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
685 	/* TODO: Discuss with FW on changing this to unique ID and using
686 	 * htt_ring_type to send the type of ring
687 	 */
688 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
689 
690 	/* word 1 */
691 	msg_word++;
692 	*msg_word = 0;
693 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
694 		srng_params.ring_base_paddr & 0xffffffff);
695 
696 	/* word 2 */
697 	msg_word++;
698 	*msg_word = 0;
699 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
700 		(uint64_t)srng_params.ring_base_paddr >> 32);
701 
702 	/* word 3 */
703 	msg_word++;
704 	*msg_word = 0;
705 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
706 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
707 		(ring_entry_size * srng_params.num_entries));
708 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
709 		  "%s: entry_size %d", __func__,
710 			 ring_entry_size);
711 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
712 		  "%s: num_entries %d", __func__,
713 			 srng_params.num_entries);
714 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
715 		  "%s: ring_size %d", __func__,
716 			 (ring_entry_size * srng_params.num_entries));
717 	if (htt_ring_type == HTT_SW_TO_HW_RING)
718 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
719 						*msg_word, 1);
720 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
721 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
722 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
723 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
724 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
725 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
726 
727 	/* word 4 */
728 	msg_word++;
729 	*msg_word = 0;
730 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
731 		hp_addr & 0xffffffff);
732 
733 	/* word 5 */
734 	msg_word++;
735 	*msg_word = 0;
736 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
737 		(uint64_t)hp_addr >> 32);
738 
739 	/* word 6 */
740 	msg_word++;
741 	*msg_word = 0;
742 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
743 		tp_addr & 0xffffffff);
744 
745 	/* word 7 */
746 	msg_word++;
747 	*msg_word = 0;
748 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
749 		(uint64_t)tp_addr >> 32);
750 
751 	/* word 8 */
752 	msg_word++;
753 	*msg_word = 0;
754 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
755 		srng_params.msi_addr & 0xffffffff);
756 
757 	/* word 9 */
758 	msg_word++;
759 	*msg_word = 0;
760 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
761 		(uint64_t)(srng_params.msi_addr) >> 32);
762 
763 	/* word 10 */
764 	msg_word++;
765 	*msg_word = 0;
766 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
767 		srng_params.msi_data);
768 
769 	/* word 11 */
770 	msg_word++;
771 	*msg_word = 0;
772 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
773 		srng_params.intr_batch_cntr_thres_entries *
774 		ring_entry_size);
775 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
776 		srng_params.intr_timer_thres_us >> 3);
777 
778 	/* word 12 */
779 	msg_word++;
780 	*msg_word = 0;
781 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
782 		/* TODO: Setting low threshold to 1/8th of ring size - see
783 		 * if this needs to be configurable
784 		 */
785 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
786 			srng_params.low_threshold);
787 	}
788 	/* "response_required" field should be set if a HTT response message is
789 	 * required after setting up the ring.
790 	 */
791 	pkt = htt_htc_pkt_alloc(soc);
792 	if (!pkt)
793 		goto fail1;
794 
795 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
796 
797 	SET_HTC_PACKET_INFO_TX(
798 		&pkt->htc_pkt,
799 		dp_htt_h2t_send_complete_free_netbuf,
800 		qdf_nbuf_data(htt_msg),
801 		qdf_nbuf_len(htt_msg),
802 		soc->htc_endpoint,
803 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
804 
805 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
806 	DP_HTT_SEND_HTC_PKT(soc, pkt);
807 
808 	return QDF_STATUS_SUCCESS;
809 
810 fail1:
811 	qdf_nbuf_free(htt_msg);
812 fail0:
813 	return QDF_STATUS_E_FAILURE;
814 }
815 
816 /*
817  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
818  * config message to target
819  * @htt_soc:	HTT SOC handle
820  * @pdev_id:	PDEV Id
821  * @hal_srng:	Opaque HAL SRNG pointer
822  * @hal_ring_type:	SRNG ring type
823  * @ring_buf_size:	SRNG buffer size
824  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
825  * Return: 0 on success; error code on failure
826  */
827 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
828 	int hal_ring_type, int ring_buf_size,
829 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
830 {
831 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
832 	struct dp_htt_htc_pkt *pkt;
833 	qdf_nbuf_t htt_msg;
834 	uint32_t *msg_word;
835 	struct hal_srng_params srng_params;
836 	uint32_t htt_ring_type, htt_ring_id;
837 	uint32_t tlv_filter;
838 
839 	htt_msg = qdf_nbuf_alloc(soc->osdev,
840 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
841 	/* reserve room for the HTC header */
842 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
843 	if (!htt_msg)
844 		goto fail0;
845 
846 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
847 
848 	switch (hal_ring_type) {
849 	case RXDMA_BUF:
850 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
851 		htt_ring_type = HTT_SW_TO_HW_RING;
852 		break;
853 	case RXDMA_MONITOR_BUF:
854 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
855 		htt_ring_type = HTT_SW_TO_HW_RING;
856 		break;
857 	case RXDMA_MONITOR_STATUS:
858 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
859 		htt_ring_type = HTT_SW_TO_HW_RING;
860 		break;
861 	case RXDMA_MONITOR_DST:
862 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
863 		htt_ring_type = HTT_HW_TO_SW_RING;
864 		break;
865 	case RXDMA_MONITOR_DESC:
866 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
867 		htt_ring_type = HTT_SW_TO_HW_RING;
868 		break;
869 	case RXDMA_DST:
870 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
871 		htt_ring_type = HTT_HW_TO_SW_RING;
872 		break;
873 
874 	default:
875 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
876 			"%s: Ring currently not supported", __func__);
877 		goto fail1;
878 	}
879 
880 	/*
881 	 * Set the length of the message.
882 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
883 	 * separately during the below call to qdf_nbuf_push_head.
884 	 * The contribution from the HTC header is added separately inside HTC.
885 	 */
886 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
887 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
888 			"%s: Failed to expand head for RX Ring Cfg msg",
889 			__func__);
890 		goto fail1; /* failure */
891 	}
892 
893 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
894 
895 	/* rewind beyond alignment pad to get to the HTC header reserved area */
896 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
897 
898 	/* word 0 */
899 	*msg_word = 0;
900 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
901 
902 	/*
903 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
904 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
905 	 */
906 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
907 			htt_ring_type == HTT_SW_TO_HW_RING)
908 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
909 						DP_SW2HW_MACID(pdev_id));
910 
911 	/* TODO: Discuss with FW on changing this to unique ID and using
912 	 * htt_ring_type to send the type of ring
913 	 */
914 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
915 
916 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
917 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
918 
919 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
920 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
921 
922 	HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
923 						htt_tlv_filter->offset_valid);
924 
925 	/* word 1 */
926 	msg_word++;
927 	*msg_word = 0;
928 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
929 		ring_buf_size);
930 
931 	/* word 2 */
932 	msg_word++;
933 	*msg_word = 0;
934 
935 	if (htt_tlv_filter->enable_fp) {
936 		/* TYPE: MGMT */
937 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
938 			FP, MGMT, 0000,
939 			(htt_tlv_filter->fp_mgmt_filter &
940 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
941 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
942 			FP, MGMT, 0001,
943 			(htt_tlv_filter->fp_mgmt_filter &
944 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
945 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
946 			FP, MGMT, 0010,
947 			(htt_tlv_filter->fp_mgmt_filter &
948 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
949 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
950 			FP, MGMT, 0011,
951 			(htt_tlv_filter->fp_mgmt_filter &
952 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
953 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
954 			FP, MGMT, 0100,
955 			(htt_tlv_filter->fp_mgmt_filter &
956 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
957 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
958 			FP, MGMT, 0101,
959 			(htt_tlv_filter->fp_mgmt_filter &
960 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
961 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
962 			FP, MGMT, 0110,
963 			(htt_tlv_filter->fp_mgmt_filter &
964 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
965 		/* reserved */
966 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
967 			MGMT, 0111,
968 			(htt_tlv_filter->fp_mgmt_filter &
969 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
970 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
971 			FP, MGMT, 1000,
972 			(htt_tlv_filter->fp_mgmt_filter &
973 			FILTER_MGMT_BEACON) ? 1 : 0);
974 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
975 			FP, MGMT, 1001,
976 			(htt_tlv_filter->fp_mgmt_filter &
977 			FILTER_MGMT_ATIM) ? 1 : 0);
978 	}
979 
980 	if (htt_tlv_filter->enable_md) {
981 			/* TYPE: MGMT */
982 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
983 			MD, MGMT, 0000,
984 			(htt_tlv_filter->md_mgmt_filter &
985 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
986 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
987 			MD, MGMT, 0001,
988 			(htt_tlv_filter->md_mgmt_filter &
989 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
990 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
991 			MD, MGMT, 0010,
992 			(htt_tlv_filter->md_mgmt_filter &
993 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
994 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
995 			MD, MGMT, 0011,
996 			(htt_tlv_filter->md_mgmt_filter &
997 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
998 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
999 			MD, MGMT, 0100,
1000 			(htt_tlv_filter->md_mgmt_filter &
1001 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1002 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1003 			MD, MGMT, 0101,
1004 			(htt_tlv_filter->md_mgmt_filter &
1005 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1006 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1007 			MD, MGMT, 0110,
1008 			(htt_tlv_filter->md_mgmt_filter &
1009 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1010 		/* reserved */
1011 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
1012 			MGMT, 0111,
1013 			(htt_tlv_filter->md_mgmt_filter &
1014 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1015 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1016 			MD, MGMT, 1000,
1017 			(htt_tlv_filter->md_mgmt_filter &
1018 			FILTER_MGMT_BEACON) ? 1 : 0);
1019 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1020 			MD, MGMT, 1001,
1021 			(htt_tlv_filter->md_mgmt_filter &
1022 			FILTER_MGMT_ATIM) ? 1 : 0);
1023 	}
1024 
1025 	if (htt_tlv_filter->enable_mo) {
1026 		/* TYPE: MGMT */
1027 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1028 			MO, MGMT, 0000,
1029 			(htt_tlv_filter->mo_mgmt_filter &
1030 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1031 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1032 			MO, MGMT, 0001,
1033 			(htt_tlv_filter->mo_mgmt_filter &
1034 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1035 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1036 			MO, MGMT, 0010,
1037 			(htt_tlv_filter->mo_mgmt_filter &
1038 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1039 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1040 			MO, MGMT, 0011,
1041 			(htt_tlv_filter->mo_mgmt_filter &
1042 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1043 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1044 			MO, MGMT, 0100,
1045 			(htt_tlv_filter->mo_mgmt_filter &
1046 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1047 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1048 			MO, MGMT, 0101,
1049 			(htt_tlv_filter->mo_mgmt_filter &
1050 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
1051 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1052 			MO, MGMT, 0110,
1053 			(htt_tlv_filter->mo_mgmt_filter &
1054 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1055 		/* reserved */
1056 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
1057 			MGMT, 0111,
1058 			(htt_tlv_filter->mo_mgmt_filter &
1059 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
1060 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1061 			MO, MGMT, 1000,
1062 			(htt_tlv_filter->mo_mgmt_filter &
1063 			FILTER_MGMT_BEACON) ? 1 : 0);
1064 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1065 			MO, MGMT, 1001,
1066 			(htt_tlv_filter->mo_mgmt_filter &
1067 			FILTER_MGMT_ATIM) ? 1 : 0);
1068 	}
1069 
1070 	/* word 3 */
1071 	msg_word++;
1072 	*msg_word = 0;
1073 
1074 	if (htt_tlv_filter->enable_fp) {
1075 		/* TYPE: MGMT */
1076 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1077 			FP, MGMT, 1010,
1078 			(htt_tlv_filter->fp_mgmt_filter &
1079 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1080 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1081 			FP, MGMT, 1011,
1082 			(htt_tlv_filter->fp_mgmt_filter &
1083 			FILTER_MGMT_AUTH) ? 1 : 0);
1084 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1085 			FP, MGMT, 1100,
1086 			(htt_tlv_filter->fp_mgmt_filter &
1087 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1088 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1089 			FP, MGMT, 1101,
1090 			(htt_tlv_filter->fp_mgmt_filter &
1091 			FILTER_MGMT_ACTION) ? 1 : 0);
1092 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1093 			FP, MGMT, 1110,
1094 			(htt_tlv_filter->fp_mgmt_filter &
1095 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1096 		/* reserved*/
1097 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
1098 			MGMT, 1111,
1099 			(htt_tlv_filter->fp_mgmt_filter &
1100 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1101 	}
1102 
1103 	if (htt_tlv_filter->enable_md) {
1104 			/* TYPE: MGMT */
1105 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1106 			MD, MGMT, 1010,
1107 			(htt_tlv_filter->md_mgmt_filter &
1108 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1109 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1110 			MD, MGMT, 1011,
1111 			(htt_tlv_filter->md_mgmt_filter &
1112 			FILTER_MGMT_AUTH) ? 1 : 0);
1113 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1114 			MD, MGMT, 1100,
1115 			(htt_tlv_filter->md_mgmt_filter &
1116 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1117 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1118 			MD, MGMT, 1101,
1119 			(htt_tlv_filter->md_mgmt_filter &
1120 			FILTER_MGMT_ACTION) ? 1 : 0);
1121 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1122 			MD, MGMT, 1110,
1123 			(htt_tlv_filter->md_mgmt_filter &
1124 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1125 	}
1126 
1127 	if (htt_tlv_filter->enable_mo) {
1128 		/* TYPE: MGMT */
1129 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1130 			MO, MGMT, 1010,
1131 			(htt_tlv_filter->mo_mgmt_filter &
1132 			FILTER_MGMT_DISASSOC) ? 1 : 0);
1133 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1134 			MO, MGMT, 1011,
1135 			(htt_tlv_filter->mo_mgmt_filter &
1136 			FILTER_MGMT_AUTH) ? 1 : 0);
1137 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1138 			MO, MGMT, 1100,
1139 			(htt_tlv_filter->mo_mgmt_filter &
1140 			FILTER_MGMT_DEAUTH) ? 1 : 0);
1141 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1142 			MO, MGMT, 1101,
1143 			(htt_tlv_filter->mo_mgmt_filter &
1144 			FILTER_MGMT_ACTION) ? 1 : 0);
1145 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1146 			MO, MGMT, 1110,
1147 			(htt_tlv_filter->mo_mgmt_filter &
1148 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1149 		/* reserved*/
1150 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
1151 			MGMT, 1111,
1152 			(htt_tlv_filter->mo_mgmt_filter &
1153 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
1154 	}
1155 
1156 	/* word 4 */
1157 	msg_word++;
1158 	*msg_word = 0;
1159 
1160 	if (htt_tlv_filter->enable_fp) {
1161 		/* TYPE: CTRL */
1162 		/* reserved */
1163 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1164 			CTRL, 0000,
1165 			(htt_tlv_filter->fp_ctrl_filter &
1166 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1167 		/* reserved */
1168 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1169 			CTRL, 0001,
1170 			(htt_tlv_filter->fp_ctrl_filter &
1171 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1172 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1173 			CTRL, 0010,
1174 			(htt_tlv_filter->fp_ctrl_filter &
1175 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1176 		/* reserved */
1177 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1178 			CTRL, 0011,
1179 			(htt_tlv_filter->fp_ctrl_filter &
1180 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1181 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1182 			CTRL, 0100,
1183 			(htt_tlv_filter->fp_ctrl_filter &
1184 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1185 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1186 			CTRL, 0101,
1187 			(htt_tlv_filter->fp_ctrl_filter &
1188 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1189 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1190 			CTRL, 0110,
1191 			(htt_tlv_filter->fp_ctrl_filter &
1192 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1193 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1194 			CTRL, 0111,
1195 			(htt_tlv_filter->fp_ctrl_filter &
1196 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1197 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1198 			CTRL, 1000,
1199 			(htt_tlv_filter->fp_ctrl_filter &
1200 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1201 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1202 			CTRL, 1001,
1203 			(htt_tlv_filter->fp_ctrl_filter &
1204 			FILTER_CTRL_BA) ? 1 : 0);
1205 	}
1206 
1207 	if (htt_tlv_filter->enable_md) {
1208 		/* TYPE: CTRL */
1209 		/* reserved */
1210 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1211 			CTRL, 0000,
1212 			(htt_tlv_filter->md_ctrl_filter &
1213 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1214 		/* reserved */
1215 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1216 			CTRL, 0001,
1217 			(htt_tlv_filter->md_ctrl_filter &
1218 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1219 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1220 			CTRL, 0010,
1221 			(htt_tlv_filter->md_ctrl_filter &
1222 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1223 		/* reserved */
1224 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1225 			CTRL, 0011,
1226 			(htt_tlv_filter->md_ctrl_filter &
1227 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1228 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1229 			CTRL, 0100,
1230 			(htt_tlv_filter->md_ctrl_filter &
1231 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1232 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1233 			CTRL, 0101,
1234 			(htt_tlv_filter->md_ctrl_filter &
1235 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1236 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1237 			CTRL, 0110,
1238 			(htt_tlv_filter->md_ctrl_filter &
1239 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1240 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1241 			CTRL, 0111,
1242 			(htt_tlv_filter->md_ctrl_filter &
1243 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1244 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1245 			CTRL, 1000,
1246 			(htt_tlv_filter->md_ctrl_filter &
1247 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1248 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1249 			CTRL, 1001,
1250 			(htt_tlv_filter->md_ctrl_filter &
1251 			FILTER_CTRL_BA) ? 1 : 0);
1252 	}
1253 
1254 	if (htt_tlv_filter->enable_mo) {
1255 		/* TYPE: CTRL */
1256 		/* reserved */
1257 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1258 			CTRL, 0000,
1259 			(htt_tlv_filter->mo_ctrl_filter &
1260 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1261 		/* reserved */
1262 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1263 			CTRL, 0001,
1264 			(htt_tlv_filter->mo_ctrl_filter &
1265 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1266 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1267 			CTRL, 0010,
1268 			(htt_tlv_filter->mo_ctrl_filter &
1269 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1270 		/* reserved */
1271 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1272 			CTRL, 0011,
1273 			(htt_tlv_filter->mo_ctrl_filter &
1274 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1275 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1276 			CTRL, 0100,
1277 			(htt_tlv_filter->mo_ctrl_filter &
1278 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1279 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1280 			CTRL, 0101,
1281 			(htt_tlv_filter->mo_ctrl_filter &
1282 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1283 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1284 			CTRL, 0110,
1285 			(htt_tlv_filter->mo_ctrl_filter &
1286 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1287 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1288 			CTRL, 0111,
1289 			(htt_tlv_filter->mo_ctrl_filter &
1290 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1291 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1292 			CTRL, 1000,
1293 			(htt_tlv_filter->mo_ctrl_filter &
1294 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1295 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1296 			CTRL, 1001,
1297 			(htt_tlv_filter->mo_ctrl_filter &
1298 			FILTER_CTRL_BA) ? 1 : 0);
1299 	}
1300 
1301 	/* word 5 */
1302 	msg_word++;
1303 	*msg_word = 0;
1304 	if (htt_tlv_filter->enable_fp) {
1305 		/* TYPE: CTRL */
1306 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1307 			CTRL, 1010,
1308 			(htt_tlv_filter->fp_ctrl_filter &
1309 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1310 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1311 			CTRL, 1011,
1312 			(htt_tlv_filter->fp_ctrl_filter &
1313 			FILTER_CTRL_RTS) ? 1 : 0);
1314 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1315 			CTRL, 1100,
1316 			(htt_tlv_filter->fp_ctrl_filter &
1317 			FILTER_CTRL_CTS) ? 1 : 0);
1318 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1319 			CTRL, 1101,
1320 			(htt_tlv_filter->fp_ctrl_filter &
1321 			FILTER_CTRL_ACK) ? 1 : 0);
1322 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1323 			CTRL, 1110,
1324 			(htt_tlv_filter->fp_ctrl_filter &
1325 			FILTER_CTRL_CFEND) ? 1 : 0);
1326 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1327 			CTRL, 1111,
1328 			(htt_tlv_filter->fp_ctrl_filter &
1329 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1330 		/* TYPE: DATA */
1331 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1332 			DATA, MCAST,
1333 			(htt_tlv_filter->fp_data_filter &
1334 			FILTER_DATA_MCAST) ? 1 : 0);
1335 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1336 			DATA, UCAST,
1337 			(htt_tlv_filter->fp_data_filter &
1338 			FILTER_DATA_UCAST) ? 1 : 0);
1339 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1340 			DATA, NULL,
1341 			(htt_tlv_filter->fp_data_filter &
1342 			FILTER_DATA_NULL) ? 1 : 0);
1343 	}
1344 
1345 	if (htt_tlv_filter->enable_md) {
1346 		/* TYPE: CTRL */
1347 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1348 			CTRL, 1010,
1349 			(htt_tlv_filter->md_ctrl_filter &
1350 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1351 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1352 			CTRL, 1011,
1353 			(htt_tlv_filter->md_ctrl_filter &
1354 			FILTER_CTRL_RTS) ? 1 : 0);
1355 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1356 			CTRL, 1100,
1357 			(htt_tlv_filter->md_ctrl_filter &
1358 			FILTER_CTRL_CTS) ? 1 : 0);
1359 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1360 			CTRL, 1101,
1361 			(htt_tlv_filter->md_ctrl_filter &
1362 			FILTER_CTRL_ACK) ? 1 : 0);
1363 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1364 			CTRL, 1110,
1365 			(htt_tlv_filter->md_ctrl_filter &
1366 			FILTER_CTRL_CFEND) ? 1 : 0);
1367 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1368 			CTRL, 1111,
1369 			(htt_tlv_filter->md_ctrl_filter &
1370 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1371 		/* TYPE: DATA */
1372 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1373 			DATA, MCAST,
1374 			(htt_tlv_filter->md_data_filter &
1375 			FILTER_DATA_MCAST) ? 1 : 0);
1376 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1377 			DATA, UCAST,
1378 			(htt_tlv_filter->md_data_filter &
1379 			FILTER_DATA_UCAST) ? 1 : 0);
1380 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1381 			DATA, NULL,
1382 			(htt_tlv_filter->md_data_filter &
1383 			FILTER_DATA_NULL) ? 1 : 0);
1384 	}
1385 
1386 	if (htt_tlv_filter->enable_mo) {
1387 		/* TYPE: CTRL */
1388 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1389 			CTRL, 1010,
1390 			(htt_tlv_filter->mo_ctrl_filter &
1391 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1392 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1393 			CTRL, 1011,
1394 			(htt_tlv_filter->mo_ctrl_filter &
1395 			FILTER_CTRL_RTS) ? 1 : 0);
1396 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1397 			CTRL, 1100,
1398 			(htt_tlv_filter->mo_ctrl_filter &
1399 			FILTER_CTRL_CTS) ? 1 : 0);
1400 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1401 			CTRL, 1101,
1402 			(htt_tlv_filter->mo_ctrl_filter &
1403 			FILTER_CTRL_ACK) ? 1 : 0);
1404 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1405 			CTRL, 1110,
1406 			(htt_tlv_filter->mo_ctrl_filter &
1407 			FILTER_CTRL_CFEND) ? 1 : 0);
1408 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1409 			CTRL, 1111,
1410 			(htt_tlv_filter->mo_ctrl_filter &
1411 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1412 		/* TYPE: DATA */
1413 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1414 			DATA, MCAST,
1415 			(htt_tlv_filter->mo_data_filter &
1416 			FILTER_DATA_MCAST) ? 1 : 0);
1417 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1418 			DATA, UCAST,
1419 			(htt_tlv_filter->mo_data_filter &
1420 			FILTER_DATA_UCAST) ? 1 : 0);
1421 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1422 			DATA, NULL,
1423 			(htt_tlv_filter->mo_data_filter &
1424 			FILTER_DATA_NULL) ? 1 : 0);
1425 	}
1426 
1427 	/* word 6 */
1428 	msg_word++;
1429 	*msg_word = 0;
1430 	tlv_filter = 0;
1431 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1432 		htt_tlv_filter->mpdu_start);
1433 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1434 		htt_tlv_filter->msdu_start);
1435 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1436 		htt_tlv_filter->packet);
1437 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1438 		htt_tlv_filter->msdu_end);
1439 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1440 		htt_tlv_filter->mpdu_end);
1441 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1442 		htt_tlv_filter->packet_header);
1443 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1444 		htt_tlv_filter->attention);
1445 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1446 		htt_tlv_filter->ppdu_start);
1447 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1448 		htt_tlv_filter->ppdu_end);
1449 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1450 		htt_tlv_filter->ppdu_end_user_stats);
1451 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1452 		PPDU_END_USER_STATS_EXT,
1453 		htt_tlv_filter->ppdu_end_user_stats_ext);
1454 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1455 		htt_tlv_filter->ppdu_end_status_done);
1456 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1457 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1458 		 htt_tlv_filter->header_per_msdu);
1459 
1460 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1461 
1462 	msg_word++;
1463 	*msg_word = 0;
1464 	if (htt_tlv_filter->offset_valid) {
1465 		HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1466 					htt_tlv_filter->rx_packet_offset);
1467 		HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1468 					htt_tlv_filter->rx_header_offset);
1469 
1470 		msg_word++;
1471 		*msg_word = 0;
1472 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1473 					htt_tlv_filter->rx_mpdu_end_offset);
1474 		HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1475 					htt_tlv_filter->rx_mpdu_start_offset);
1476 
1477 		msg_word++;
1478 		*msg_word = 0;
1479 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1480 					htt_tlv_filter->rx_msdu_end_offset);
1481 		HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1482 					htt_tlv_filter->rx_msdu_start_offset);
1483 
1484 		msg_word++;
1485 		*msg_word = 0;
1486 		HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1487 					htt_tlv_filter->rx_attn_offset);
1488 	}
1489 
1490 	/* "response_required" field should be set if a HTT response message is
1491 	 * required after setting up the ring.
1492 	 */
1493 	pkt = htt_htc_pkt_alloc(soc);
1494 	if (!pkt)
1495 		goto fail1;
1496 
1497 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1498 
1499 	SET_HTC_PACKET_INFO_TX(
1500 		&pkt->htc_pkt,
1501 		dp_htt_h2t_send_complete_free_netbuf,
1502 		qdf_nbuf_data(htt_msg),
1503 		qdf_nbuf_len(htt_msg),
1504 		soc->htc_endpoint,
1505 		1); /* tag - not relevant here */
1506 
1507 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1508 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1509 	return QDF_STATUS_SUCCESS;
1510 
1511 fail1:
1512 	qdf_nbuf_free(htt_msg);
1513 fail0:
1514 	return QDF_STATUS_E_FAILURE;
1515 }
1516 
1517 #if defined(HTT_STATS_ENABLE)
1518 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1519 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1520 
1521 {
1522 	uint32_t pdev_id;
1523 	uint32_t *msg_word = NULL;
1524 	uint32_t msg_remain_len = 0;
1525 
1526 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1527 
1528 	/*COOKIE MSB*/
1529 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1530 
1531 	/* stats message length + 16 size of HTT header*/
1532 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1533 				(uint32_t)DP_EXT_MSG_LENGTH);
1534 
1535 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1536 			msg_word,  msg_remain_len,
1537 			WDI_NO_VAL, pdev_id);
1538 
1539 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1540 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1541 	}
1542 	/* Need to be freed here as WDI handler will
1543 	 * make a copy of pkt to send data to application
1544 	 */
1545 	qdf_nbuf_free(htt_msg);
1546 	return QDF_STATUS_SUCCESS;
1547 }
1548 #else
1549 static inline QDF_STATUS
1550 dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1551 		      struct dp_soc *soc, qdf_nbuf_t htt_msg)
1552 {
1553 	return QDF_STATUS_E_NOSUPPORT;
1554 }
1555 #endif
1556 /**
1557  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1558  * @htt_stats: htt stats info
1559  *
1560  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1561  * contains sub messages which are identified by a TLV header.
1562  * In this function we will process the stream of T2H messages and read all the
1563  * TLV contained in the message.
1564  *
1565  * THe following cases have been taken care of
1566  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1567  *		In this case the buffer will contain multiple tlvs.
1568  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1569  *		Only one tlv will be contained in the HTT message and this tag
1570  *		will extend onto the next buffer.
1571  * Case 3: When the buffer is the continuation of the previous message
1572  * Case 4: tlv length is 0. which will indicate the end of message
1573  *
1574  * return: void
1575  */
1576 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1577 					struct dp_soc *soc)
1578 {
1579 	htt_tlv_tag_t tlv_type = 0xff;
1580 	qdf_nbuf_t htt_msg = NULL;
1581 	uint32_t *msg_word;
1582 	uint8_t *tlv_buf_head = NULL;
1583 	uint8_t *tlv_buf_tail = NULL;
1584 	uint32_t msg_remain_len = 0;
1585 	uint32_t tlv_remain_len = 0;
1586 	uint32_t *tlv_start;
1587 	int cookie_val;
1588 	int cookie_msb;
1589 	int pdev_id;
1590 	bool copy_stats = false;
1591 	struct dp_pdev *pdev;
1592 
1593 	/* Process node in the HTT message queue */
1594 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1595 		!= NULL) {
1596 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1597 		cookie_val = *(msg_word + 1);
1598 		htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1599 					*(msg_word +
1600 					HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1601 
1602 		if (cookie_val) {
1603 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1604 					== QDF_STATUS_SUCCESS) {
1605 				continue;
1606 			}
1607 		}
1608 
1609 		cookie_msb = *(msg_word + 2);
1610 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1611 		pdev = soc->pdev_list[pdev_id];
1612 
1613 		if (cookie_msb >> 2) {
1614 			copy_stats = true;
1615 		}
1616 
1617 		/* read 5th word */
1618 		msg_word = msg_word + 4;
1619 		msg_remain_len = qdf_min(htt_stats->msg_len,
1620 				(uint32_t) DP_EXT_MSG_LENGTH);
1621 		/* Keep processing the node till node length is 0 */
1622 		while (msg_remain_len) {
1623 			/*
1624 			 * if message is not a continuation of previous message
1625 			 * read the tlv type and tlv length
1626 			 */
1627 			if (!tlv_buf_head) {
1628 				tlv_type = HTT_STATS_TLV_TAG_GET(
1629 						*msg_word);
1630 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1631 						*msg_word);
1632 			}
1633 
1634 			if (tlv_remain_len == 0) {
1635 				msg_remain_len = 0;
1636 
1637 				if (tlv_buf_head) {
1638 					qdf_mem_free(tlv_buf_head);
1639 					tlv_buf_head = NULL;
1640 					tlv_buf_tail = NULL;
1641 				}
1642 
1643 				goto error;
1644 			}
1645 
1646 			if (!tlv_buf_head)
1647 				tlv_remain_len += HTT_TLV_HDR_LEN;
1648 
1649 			if ((tlv_remain_len <= msg_remain_len)) {
1650 				/* Case 3 */
1651 				if (tlv_buf_head) {
1652 					qdf_mem_copy(tlv_buf_tail,
1653 							(uint8_t *)msg_word,
1654 							tlv_remain_len);
1655 					tlv_start = (uint32_t *)tlv_buf_head;
1656 				} else {
1657 					/* Case 1 */
1658 					tlv_start = msg_word;
1659 				}
1660 
1661 				if (copy_stats)
1662 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1663 				else
1664 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1665 
1666 				if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1667 				    tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1668 					dp_peer_update_inactive_time(pdev,
1669 								     tlv_type,
1670 								     tlv_start);
1671 
1672 				msg_remain_len -= tlv_remain_len;
1673 
1674 				msg_word = (uint32_t *)
1675 					(((uint8_t *)msg_word) +
1676 					tlv_remain_len);
1677 
1678 				tlv_remain_len = 0;
1679 
1680 				if (tlv_buf_head) {
1681 					qdf_mem_free(tlv_buf_head);
1682 					tlv_buf_head = NULL;
1683 					tlv_buf_tail = NULL;
1684 				}
1685 
1686 			} else { /* tlv_remain_len > msg_remain_len */
1687 				/* Case 2 & 3 */
1688 				if (!tlv_buf_head) {
1689 					tlv_buf_head = qdf_mem_malloc(
1690 							tlv_remain_len);
1691 
1692 					if (!tlv_buf_head) {
1693 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1694 								QDF_TRACE_LEVEL_ERROR,
1695 								"Alloc failed");
1696 						goto error;
1697 					}
1698 
1699 					tlv_buf_tail = tlv_buf_head;
1700 				}
1701 
1702 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1703 						msg_remain_len);
1704 				tlv_remain_len -= msg_remain_len;
1705 				tlv_buf_tail += msg_remain_len;
1706 			}
1707 		}
1708 
1709 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1710 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1711 		}
1712 
1713 		qdf_nbuf_free(htt_msg);
1714 	}
1715 	return;
1716 
1717 error:
1718 	qdf_nbuf_free(htt_msg);
1719 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1720 			!= NULL)
1721 		qdf_nbuf_free(htt_msg);
1722 }
1723 
1724 void htt_t2h_stats_handler(void *context)
1725 {
1726 	struct dp_soc *soc = (struct dp_soc *)context;
1727 	struct htt_stats_context htt_stats;
1728 	uint32_t *msg_word;
1729 	qdf_nbuf_t htt_msg = NULL;
1730 	uint8_t done;
1731 	uint8_t rem_stats;
1732 
1733 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1734 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1735 			"soc: 0x%pK, init_done: %d", soc,
1736 			qdf_atomic_read(&soc->cmn_init_done));
1737 		return;
1738 	}
1739 
1740 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1741 	qdf_nbuf_queue_init(&htt_stats.msg);
1742 
1743 	/* pull one completed stats from soc->htt_stats_msg and process */
1744 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1745 	if (!soc->htt_stats.num_stats) {
1746 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1747 		return;
1748 	}
1749 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1750 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1751 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1752 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1753 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1754 		/*
1755 		 * Done bit signifies that this is the last T2H buffer in the
1756 		 * stream of HTT EXT STATS message
1757 		 */
1758 		if (done)
1759 			break;
1760 	}
1761 	rem_stats = --soc->htt_stats.num_stats;
1762 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1763 
1764 	dp_process_htt_stat_msg(&htt_stats, soc);
1765 	/* If there are more stats to process, schedule stats work again */
1766 	if (rem_stats)
1767 		qdf_sched_work(0, &soc->htt_stats.work);
1768 }
1769 
1770 /*
1771  * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1772  * if a new peer id arrives in a PPDU
1773  * pdev: DP pdev handle
1774  * @peer_id : peer unique identifier
1775  * @ppdu_info: per ppdu tlv structure
1776  *
1777  * return:user index to be populated
1778  */
1779 #ifdef FEATURE_PERPKT_INFO
1780 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1781 						uint16_t peer_id,
1782 						struct ppdu_info *ppdu_info)
1783 {
1784 	uint8_t user_index = 0;
1785 	struct cdp_tx_completion_ppdu *ppdu_desc;
1786 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1787 
1788 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1789 
1790 	while ((user_index + 1) <= ppdu_info->last_user) {
1791 		ppdu_user_desc = &ppdu_desc->user[user_index];
1792 		if (ppdu_user_desc->peer_id != peer_id) {
1793 			user_index++;
1794 			continue;
1795 		} else {
1796 			/* Max users possible is 8 so user array index should
1797 			 * not exceed 7
1798 			 */
1799 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1800 			return user_index;
1801 		}
1802 	}
1803 
1804 	ppdu_info->last_user++;
1805 	/* Max users possible is 8 so last user should not exceed 8 */
1806 	qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
1807 	return ppdu_info->last_user - 1;
1808 }
1809 
1810 /*
1811  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1812  * pdev: DP pdev handle
1813  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1814  * @ppdu_info: per ppdu tlv structure
1815  *
1816  * return:void
1817  */
1818 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1819 		uint32_t *tag_buf, struct ppdu_info *ppdu_info)
1820 {
1821 	uint16_t frame_type;
1822 	uint16_t freq;
1823 	struct dp_soc *soc = NULL;
1824 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
1825 
1826 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1827 
1828 	tag_buf += 2;
1829 	ppdu_info->sched_cmdid =
1830 		HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
1831 	ppdu_desc->num_users =
1832 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1833 	tag_buf++;
1834 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1835 
1836 	switch (frame_type) {
1837 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
1838 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
1839 		/*
1840 		 * for management packet, frame type come as DATA_SU
1841 		 * need to check frame_ctrl before setting frame_type
1842 		 */
1843 		if (HTT_GET_FRAME_CTRL_TYPE(frame_type) <= FRAME_CTRL_TYPE_CTRL)
1844 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1845 		else
1846 			ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1847 	break;
1848 	case HTT_STATS_FTYPE_SGEN_MU_BAR:
1849 	case HTT_STATS_FTYPE_SGEN_BAR:
1850 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
1851 	break;
1852 	default:
1853 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1854 	break;
1855 	}
1856 
1857 	tag_buf += 2;
1858 	ppdu_desc->tx_duration = *tag_buf;
1859 	tag_buf += 3;
1860 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1861 
1862 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1863 					ppdu_desc->tx_duration;
1864 	/* Ack time stamp is same as end time stamp*/
1865 	ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
1866 
1867 	tag_buf++;
1868 
1869 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1870 	if (freq != ppdu_desc->channel) {
1871 		soc = pdev->soc;
1872 		ppdu_desc->channel = freq;
1873 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1874 			pdev->operating_channel =
1875 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->ctrl_pdev, freq);
1876 	}
1877 
1878 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1879 }
1880 
1881 /*
1882  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1883  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1884  * @ppdu_info: per ppdu tlv structure
1885  *
1886  * return:void
1887  */
1888 static void dp_process_ppdu_stats_user_common_tlv(
1889 		struct dp_pdev *pdev, uint32_t *tag_buf,
1890 		struct ppdu_info *ppdu_info)
1891 {
1892 	uint16_t peer_id;
1893 	struct cdp_tx_completion_ppdu *ppdu_desc;
1894 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1895 	uint8_t curr_user_index = 0;
1896 
1897 	ppdu_desc =
1898 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1899 
1900 	tag_buf++;
1901 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1902 
1903 	curr_user_index =
1904 		dp_get_ppdu_info_user_index(pdev,
1905 					    peer_id, ppdu_info);
1906 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1907 
1908 	if (peer_id == DP_SCAN_PEER_ID) {
1909 		ppdu_desc->vdev_id =
1910 			HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
1911 	} else {
1912 		if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
1913 			return;
1914 	}
1915 
1916 	ppdu_user_desc->peer_id = peer_id;
1917 
1918 	tag_buf++;
1919 
1920 	if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
1921 		ppdu_user_desc->delayed_ba = 1;
1922 	}
1923 
1924 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1925 		ppdu_user_desc->is_mcast = true;
1926 		ppdu_user_desc->mpdu_tried_mcast =
1927 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1928 		ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
1929 	} else {
1930 		ppdu_user_desc->mpdu_tried_ucast =
1931 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1932 	}
1933 
1934 	tag_buf++;
1935 
1936 	ppdu_user_desc->qos_ctrl =
1937 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1938 	ppdu_user_desc->frame_ctrl =
1939 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1940 	ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
1941 
1942 	if (ppdu_user_desc->delayed_ba) {
1943 		ppdu_user_desc->mpdu_success = 0;
1944 		ppdu_user_desc->mpdu_tried_mcast = 0;
1945 		ppdu_user_desc->mpdu_tried_ucast = 0;
1946 	}
1947 }
1948 
1949 
1950 /**
1951  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1952  * @pdev: DP pdev handle
1953  * @tag_buf: T2H message buffer carrying the user rate TLV
1954  * @ppdu_info: per ppdu tlv structure
1955  *
1956  * return:void
1957  */
1958 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1959 		uint32_t *tag_buf,
1960 		struct ppdu_info *ppdu_info)
1961 {
1962 	uint16_t peer_id;
1963 	struct dp_peer *peer;
1964 	struct cdp_tx_completion_ppdu *ppdu_desc;
1965 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1966 	uint8_t curr_user_index = 0;
1967 	struct dp_vdev *vdev;
1968 
1969 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
1970 
1971 	tag_buf++;
1972 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1973 
1974 	curr_user_index =
1975 		dp_get_ppdu_info_user_index(pdev,
1976 					    peer_id, ppdu_info);
1977 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1978 	if (peer_id == DP_SCAN_PEER_ID) {
1979 		vdev =
1980 		       dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
1981 							  ppdu_desc->vdev_id);
1982 		if (!vdev)
1983 			return;
1984 		qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
1985 			     QDF_MAC_ADDR_SIZE);
1986 	} else {
1987 		peer = dp_peer_find_by_id(pdev->soc, peer_id);
1988 		if (!peer)
1989 			return;
1990 		qdf_mem_copy(ppdu_user_desc->mac_addr,
1991 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
1992 		dp_peer_unref_del_find_by_id(peer);
1993 	}
1994 
1995 	ppdu_user_desc->peer_id = peer_id;
1996 
1997 	ppdu_user_desc->tid =
1998 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1999 
2000 	tag_buf += 1;
2001 
2002 	ppdu_user_desc->user_pos =
2003 		HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2004 	ppdu_user_desc->mu_group_id =
2005 		HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2006 
2007 	tag_buf += 1;
2008 
2009 	ppdu_user_desc->ru_start =
2010 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
2011 	ppdu_user_desc->ru_tones =
2012 		(HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2013 		HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
2014 
2015 	tag_buf += 2;
2016 
2017 	ppdu_user_desc->ppdu_type =
2018 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2019 
2020 	tag_buf++;
2021 	ppdu_user_desc->tx_rate = *tag_buf;
2022 
2023 	ppdu_user_desc->ltf_size =
2024 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2025 	ppdu_user_desc->stbc =
2026 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2027 	ppdu_user_desc->he_re =
2028 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2029 	ppdu_user_desc->txbf =
2030 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2031 	ppdu_user_desc->bw =
2032 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
2033 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2034 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2035 	ppdu_user_desc->preamble =
2036 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2037 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2038 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2039 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
2040 }
2041 
2042 /*
2043  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2044  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2045  * pdev: DP PDEV handle
2046  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
2047  * @ppdu_info: per ppdu tlv structure
2048  *
2049  * return:void
2050  */
2051 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2052 		struct dp_pdev *pdev, uint32_t *tag_buf,
2053 		struct ppdu_info *ppdu_info)
2054 {
2055 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2056 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2057 
2058 	struct cdp_tx_completion_ppdu *ppdu_desc;
2059 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2060 	uint8_t curr_user_index = 0;
2061 	uint16_t peer_id;
2062 	uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
2063 
2064 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2065 
2066 	tag_buf++;
2067 
2068 	peer_id =
2069 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2070 
2071 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2072 		return;
2073 
2074 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2075 
2076 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2077 	ppdu_user_desc->peer_id = peer_id;
2078 
2079 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2080 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2081 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2082 
2083 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2084 						   (void *)ppdu_user_desc,
2085 						   ppdu_info->ppdu_id,
2086 						   size);
2087 }
2088 
2089 /*
2090  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2091  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2092  * soc: DP SOC handle
2093  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2094  * @ppdu_info: per ppdu tlv structure
2095  *
2096  * return:void
2097  */
2098 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2099 		struct dp_pdev *pdev, uint32_t *tag_buf,
2100 		struct ppdu_info *ppdu_info)
2101 {
2102 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2103 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2104 
2105 	struct cdp_tx_completion_ppdu *ppdu_desc;
2106 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2107 	uint8_t curr_user_index = 0;
2108 	uint16_t peer_id;
2109 	uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
2110 
2111 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2112 
2113 	tag_buf++;
2114 
2115 	peer_id =
2116 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2117 
2118 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2119 		return;
2120 
2121 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2122 
2123 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2124 	ppdu_user_desc->peer_id = peer_id;
2125 
2126 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2127 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
2128 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2129 
2130 	dp_process_ppdu_stats_update_failed_bitmap(pdev,
2131 						   (void *)ppdu_user_desc,
2132 						   ppdu_info->ppdu_id,
2133 						   size);
2134 }
2135 
2136 /*
2137  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2138  * htt_ppdu_stats_user_cmpltn_common_tlv
2139  * soc: DP SOC handle
2140  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
2141  * @ppdu_info: per ppdu tlv structure
2142  *
2143  * return:void
2144  */
2145 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
2146 		struct dp_pdev *pdev, uint32_t *tag_buf,
2147 		struct ppdu_info *ppdu_info)
2148 {
2149 	uint16_t peer_id;
2150 	struct cdp_tx_completion_ppdu *ppdu_desc;
2151 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2152 	uint8_t curr_user_index = 0;
2153 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2154 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2155 
2156 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2157 
2158 	tag_buf++;
2159 	peer_id =
2160 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
2161 
2162 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2163 		return;
2164 
2165 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2166 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2167 	ppdu_user_desc->peer_id = peer_id;
2168 	ppdu_desc->last_usr_index = curr_user_index;
2169 
2170 	ppdu_user_desc->completion_status =
2171 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2172 				*tag_buf);
2173 
2174 	ppdu_user_desc->tid =
2175 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2176 
2177 
2178 	tag_buf++;
2179 	if (qdf_likely(ppdu_user_desc->completion_status ==
2180 			HTT_PPDU_STATS_USER_STATUS_OK)) {
2181 		ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2182 		ppdu_user_desc->ack_rssi_valid = 1;
2183 	} else {
2184 		ppdu_user_desc->ack_rssi_valid = 0;
2185 	}
2186 
2187 	tag_buf++;
2188 
2189 	ppdu_user_desc->mpdu_success =
2190 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2191 
2192 	tag_buf++;
2193 
2194 	ppdu_user_desc->long_retries =
2195 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2196 
2197 	ppdu_user_desc->short_retries =
2198 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
2199 	ppdu_user_desc->retry_msdus =
2200 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
2201 
2202 	ppdu_user_desc->is_ampdu =
2203 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
2204 	ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
2205 
2206 }
2207 
2208 /*
2209  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2210  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2211  * pdev: DP PDEV handle
2212  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
2213  * @ppdu_info: per ppdu tlv structure
2214  *
2215  * return:void
2216  */
2217 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2218 		struct dp_pdev *pdev, uint32_t *tag_buf,
2219 		struct ppdu_info *ppdu_info)
2220 {
2221 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2222 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2223 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2224 	struct cdp_tx_completion_ppdu *ppdu_desc;
2225 	uint8_t curr_user_index = 0;
2226 	uint16_t peer_id;
2227 
2228 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2229 
2230 	tag_buf++;
2231 
2232 	peer_id =
2233 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2234 
2235 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2236 		return;
2237 
2238 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2239 
2240 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2241 	ppdu_user_desc->peer_id = peer_id;
2242 
2243 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2244 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2245 		     sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2246 }
2247 
2248 /*
2249  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2250  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2251  * pdev: DP PDEV handle
2252  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
2253  * @ppdu_info: per ppdu tlv structure
2254  *
2255  * return:void
2256  */
2257 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2258 		struct dp_pdev *pdev, uint32_t *tag_buf,
2259 		struct ppdu_info *ppdu_info)
2260 {
2261 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2262 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2263 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2264 	struct cdp_tx_completion_ppdu *ppdu_desc;
2265 	uint8_t curr_user_index = 0;
2266 	uint16_t peer_id;
2267 
2268 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2269 
2270 	tag_buf++;
2271 
2272 	peer_id =
2273 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2274 
2275 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2276 		return;
2277 
2278 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2279 
2280 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2281 	ppdu_user_desc->peer_id = peer_id;
2282 
2283 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2284 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
2285 		     sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2286 }
2287 
2288 /*
2289  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2290  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2291  * pdev: DP PDE handle
2292  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2293  * @ppdu_info: per ppdu tlv structure
2294  *
2295  * return:void
2296  */
2297 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2298 		struct dp_pdev *pdev, uint32_t *tag_buf,
2299 		struct ppdu_info *ppdu_info)
2300 {
2301 	uint16_t peer_id;
2302 	struct cdp_tx_completion_ppdu *ppdu_desc;
2303 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2304 	uint8_t curr_user_index = 0;
2305 
2306 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2307 
2308 	tag_buf += 2;
2309 	peer_id =
2310 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2311 
2312 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
2313 		return;
2314 
2315 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2316 
2317 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2318 	ppdu_user_desc->peer_id = peer_id;
2319 
2320 	tag_buf++;
2321 	ppdu_user_desc->tid =
2322 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf);
2323 	ppdu_user_desc->num_mpdu =
2324 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2325 
2326 	ppdu_user_desc->num_msdu =
2327 		HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2328 
2329 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2330 
2331 	tag_buf += 2;
2332 	ppdu_user_desc->success_bytes = *tag_buf;
2333 
2334 }
2335 
2336 /*
2337  * dp_process_ppdu_stats_user_common_array_tlv: Process
2338  * htt_ppdu_stats_user_common_array_tlv
2339  * pdev: DP PDEV handle
2340  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2341  * @ppdu_info: per ppdu tlv structure
2342  *
2343  * return:void
2344  */
2345 static void dp_process_ppdu_stats_user_common_array_tlv(
2346 		struct dp_pdev *pdev, uint32_t *tag_buf,
2347 		struct ppdu_info *ppdu_info)
2348 {
2349 	uint32_t peer_id;
2350 	struct cdp_tx_completion_ppdu *ppdu_desc;
2351 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2352 	uint8_t curr_user_index = 0;
2353 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2354 
2355 	ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2356 
2357 	tag_buf++;
2358 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2359 	tag_buf += 3;
2360 	peer_id =
2361 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2362 
2363 	if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
2364 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2365 			"Invalid peer");
2366 		return;
2367 	}
2368 
2369 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
2370 
2371 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2372 
2373 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2374 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2375 
2376 	tag_buf++;
2377 
2378 	ppdu_user_desc->success_msdus =
2379 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2380 	ppdu_user_desc->retry_bytes =
2381 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2382 	tag_buf++;
2383 	ppdu_user_desc->failed_msdus =
2384 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2385 }
2386 
2387 /*
2388  * dp_process_ppdu_stats_flush_tlv: Process
2389  * htt_ppdu_stats_flush_tlv
2390  * @pdev: DP PDEV handle
2391  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2392  *
2393  * return:void
2394  */
2395 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2396 						uint32_t *tag_buf)
2397 {
2398 	uint32_t peer_id;
2399 	uint32_t drop_reason;
2400 	uint8_t tid;
2401 	uint32_t num_msdu;
2402 	struct dp_peer *peer;
2403 
2404 	tag_buf++;
2405 	drop_reason = *tag_buf;
2406 
2407 	tag_buf++;
2408 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2409 
2410 	tag_buf++;
2411 	peer_id =
2412 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2413 
2414 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2415 	if (!peer)
2416 		return;
2417 
2418 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2419 
2420 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2421 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2422 					num_msdu);
2423 	}
2424 
2425 	dp_peer_unref_del_find_by_id(peer);
2426 }
2427 
2428 #ifndef WLAN_TX_PKT_CAPTURE_ENH
2429 /*
2430  * dp_deliver_mgmt_frm: Process
2431  * @pdev: DP PDEV handle
2432  * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2433  *
2434  * return: void
2435  */
2436 static void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
2437 {
2438 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2439 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2440 				     nbuf, HTT_INVALID_PEER,
2441 				     WDI_NO_VAL, pdev->pdev_id);
2442 	}
2443 }
2444 #endif
2445 
2446 /*
2447  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2448  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2449  * @pdev: DP PDEV handle
2450  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2451  * @length: tlv_length
2452  *
2453  * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
2454  */
2455 static QDF_STATUS
2456 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2457 					      qdf_nbuf_t tag_buf,
2458 					      uint32_t ppdu_id)
2459 {
2460 	uint32_t *nbuf_ptr;
2461 	uint8_t trim_size;
2462 
2463 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
2464 	    (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
2465 		return QDF_STATUS_SUCCESS;
2466 
2467 	trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2468 		      HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2469 		      qdf_nbuf_data(tag_buf));
2470 
2471 	if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2472 		return QDF_STATUS_SUCCESS;
2473 
2474 	qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2475 			    pdev->mgmtctrl_frm_info.mgmt_buf_len);
2476 
2477 	nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(
2478 				tag_buf, sizeof(ppdu_id));
2479 	*nbuf_ptr = ppdu_id;
2480 
2481 	if (pdev->bpr_enable) {
2482 		dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2483 				     tag_buf, HTT_INVALID_PEER,
2484 				     WDI_NO_VAL, pdev->pdev_id);
2485 	}
2486 
2487 	dp_deliver_mgmt_frm(pdev, tag_buf);
2488 
2489 	return QDF_STATUS_E_ALREADY;
2490 }
2491 
2492 /**
2493  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2494  * @pdev: DP pdev handle
2495  * @tag_buf: TLV buffer
2496  * @tlv_len: length of tlv
2497  * @ppdu_info: per ppdu tlv structure
2498  *
2499  * return: void
2500  */
2501 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2502 		uint32_t tlv_len, struct ppdu_info *ppdu_info)
2503 {
2504 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2505 
2506 	switch (tlv_type) {
2507 	case HTT_PPDU_STATS_COMMON_TLV:
2508 		qdf_assert_always(tlv_len >=
2509 				sizeof(htt_ppdu_stats_common_tlv));
2510 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info);
2511 		break;
2512 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2513 		qdf_assert_always(tlv_len >=
2514 				sizeof(htt_ppdu_stats_user_common_tlv));
2515 		dp_process_ppdu_stats_user_common_tlv(
2516 				pdev, tag_buf, ppdu_info);
2517 		break;
2518 	case HTT_PPDU_STATS_USR_RATE_TLV:
2519 		qdf_assert_always(tlv_len >=
2520 				sizeof(htt_ppdu_stats_user_rate_tlv));
2521 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info);
2522 		break;
2523 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2524 		qdf_assert_always(tlv_len >=
2525 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2526 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
2527 				pdev, tag_buf, ppdu_info);
2528 		break;
2529 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2530 		qdf_assert_always(tlv_len >=
2531 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2532 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
2533 				pdev, tag_buf, ppdu_info);
2534 		break;
2535 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2536 		qdf_assert_always(tlv_len >=
2537 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2538 		dp_process_ppdu_stats_user_cmpltn_common_tlv(
2539 				pdev, tag_buf, ppdu_info);
2540 		break;
2541 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2542 		qdf_assert_always(tlv_len >=
2543 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2544 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
2545 				pdev, tag_buf, ppdu_info);
2546 		break;
2547 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2548 		qdf_assert_always(tlv_len >=
2549 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2550 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
2551 				pdev, tag_buf, ppdu_info);
2552 		break;
2553 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2554 		qdf_assert_always(tlv_len >=
2555 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2556 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
2557 				pdev, tag_buf, ppdu_info);
2558 		break;
2559 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2560 		qdf_assert_always(tlv_len >=
2561 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2562 		dp_process_ppdu_stats_user_common_array_tlv(
2563 				pdev, tag_buf, ppdu_info);
2564 		break;
2565 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2566 		qdf_assert_always(tlv_len >=
2567 			sizeof(htt_ppdu_stats_flush_tlv));
2568 		dp_process_ppdu_stats_user_compltn_flush_tlv(
2569 				pdev, tag_buf);
2570 		break;
2571 	default:
2572 		break;
2573 	}
2574 }
2575 
2576 /**
2577  * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
2578  * @pdev: DP pdev handle
2579  * @ppdu_info: per PPDU TLV descriptor
2580  *
2581  * return: void
2582  */
2583 void
2584 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
2585 			       struct ppdu_info *ppdu_info)
2586 {
2587 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2588 	struct dp_peer *peer = NULL;
2589 	uint32_t tlv_bitmap_expected;
2590 	uint32_t tlv_bitmap_default;
2591 	uint16_t i;
2592 
2593 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2594 		qdf_nbuf_data(ppdu_info->nbuf);
2595 
2596 	ppdu_desc->num_users = ppdu_info->last_user;
2597 	ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
2598 
2599 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2600 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2601 		if (ppdu_info->is_ampdu)
2602 			tlv_bitmap_expected =
2603 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
2604 					ppdu_info->tlv_bitmap);
2605 	}
2606 
2607 	tlv_bitmap_default = tlv_bitmap_expected;
2608 	for (i = 0; i < ppdu_desc->num_users; i++) {
2609 		ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2610 		ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2611 
2612 		peer = dp_peer_find_by_id(pdev->soc,
2613 					  ppdu_desc->user[i].peer_id);
2614 		/**
2615 		 * This check is to make sure peer is not deleted
2616 		 * after processing the TLVs.
2617 		 */
2618 		if (!peer)
2619 			continue;
2620 
2621 		ppdu_desc->user[i].cookie = (void *)peer->wlanstats_ctx;
2622 		if (ppdu_desc->user[i].completion_status !=
2623 		    HTT_PPDU_STATS_USER_STATUS_OK)
2624 			tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
2625 
2626 		if (ppdu_info->tlv_bitmap != tlv_bitmap_expected) {
2627 			dp_peer_unref_del_find_by_id(peer);
2628 			continue;
2629 		}
2630 		/**
2631 		 * Update tx stats for data frames having Qos as well as
2632 		 * non-Qos data tid
2633 		 */
2634 		if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
2635 		     (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID)) &&
2636 		      (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA)) {
2637 
2638 			dp_tx_stats_update(pdev->soc, peer,
2639 					   &ppdu_desc->user[i],
2640 					   ppdu_desc->ack_rssi);
2641 			dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
2642 		}
2643 
2644 		dp_peer_unref_del_find_by_id(peer);
2645 		tlv_bitmap_expected = tlv_bitmap_default;
2646 	}
2647 }
2648 
2649 #ifndef WLAN_TX_PKT_CAPTURE_ENH
2650 
2651 /**
2652  * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
2653  * to upper layer
2654  * @pdev: DP pdev handle
2655  * @ppdu_info: per PPDU TLV descriptor
2656  *
2657  * return: void
2658  */
2659 static
2660 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
2661 			  struct ppdu_info *ppdu_info)
2662 {
2663 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2664 	qdf_nbuf_t nbuf;
2665 
2666 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2667 		qdf_nbuf_data(ppdu_info->nbuf);
2668 
2669 	dp_ppdu_desc_user_stats_update(pdev, ppdu_info);
2670 
2671 	/*
2672 	 * Remove from the list
2673 	 */
2674 	TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
2675 	nbuf = ppdu_info->nbuf;
2676 	pdev->list_depth--;
2677 	qdf_mem_free(ppdu_info);
2678 
2679 	qdf_assert_always(nbuf);
2680 
2681 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
2682 		qdf_nbuf_data(nbuf);
2683 
2684 	/**
2685 	 * Deliver PPDU stats only for valid (acked) data frames if
2686 	 * sniffer mode is not enabled.
2687 	 * If sniffer mode is enabled, PPDU stats for all frames
2688 	 * including mgmt/control frames should be delivered to upper layer
2689 	 */
2690 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2691 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
2692 				nbuf, HTT_INVALID_PEER,
2693 				WDI_NO_VAL, pdev->pdev_id);
2694 	} else {
2695 		if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
2696 				ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
2697 
2698 			dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
2699 					pdev->soc, nbuf, HTT_INVALID_PEER,
2700 					WDI_NO_VAL, pdev->pdev_id);
2701 		} else
2702 			qdf_nbuf_free(nbuf);
2703 	}
2704 	return;
2705 }
2706 
2707 #endif
2708 
2709 /**
2710  * dp_get_ppdu_desc(): Function to allocate new PPDU status
2711  * desc for new ppdu id
2712  * @pdev: DP pdev handle
2713  * @ppdu_id: PPDU unique identifier
2714  * @tlv_type: TLV type received
2715  *
2716  * return: ppdu_info per ppdu tlv structure
2717  */
2718 static
2719 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
2720 			uint8_t tlv_type)
2721 {
2722 	struct ppdu_info *ppdu_info = NULL;
2723 
2724 	/*
2725 	 * Find ppdu_id node exists or not
2726 	 */
2727 	TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
2728 
2729 		if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
2730 			break;
2731 		}
2732 	}
2733 
2734 	if (ppdu_info) {
2735 		if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
2736 			/**
2737 			 * if we get tlv_type that is already been processed
2738 			 * for ppdu, that means we got a new ppdu with same
2739 			 * ppdu id. Hence Flush the older ppdu
2740 			 * for MUMIMO and OFDMA, In a PPDU we have
2741 			 * multiple user with same tlv types. tlv bitmap is
2742 			 * used to check whether SU or MU_MIMO/OFDMA
2743 			 */
2744 			if (!(ppdu_info->tlv_bitmap &
2745 			    (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
2746 				return ppdu_info;
2747 
2748 			dp_ppdu_desc_deliver(pdev, ppdu_info);
2749 		} else {
2750 			return ppdu_info;
2751 		}
2752 	}
2753 
2754 	/**
2755 	 * Flush the head ppdu descriptor if ppdu desc list reaches max
2756 	 * threshold
2757 	 */
2758 	if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
2759 		ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
2760 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2761 	}
2762 
2763 	/*
2764 	 * Allocate new ppdu_info node
2765 	 */
2766 	ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
2767 	if (!ppdu_info)
2768 		return NULL;
2769 
2770 	ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
2771 			sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2772 			TRUE);
2773 	if (!ppdu_info->nbuf) {
2774 		qdf_mem_free(ppdu_info);
2775 		return NULL;
2776 	}
2777 
2778 	ppdu_info->ppdu_desc =
2779 		(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
2780 	qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
2781 			sizeof(struct cdp_tx_completion_ppdu));
2782 
2783 	if (qdf_nbuf_put_tail(ppdu_info->nbuf,
2784 			sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
2785 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2786 				"No tailroom for HTT PPDU");
2787 		qdf_nbuf_free(ppdu_info->nbuf);
2788 		ppdu_info->nbuf = NULL;
2789 		ppdu_info->last_user = 0;
2790 		qdf_mem_free(ppdu_info);
2791 		return NULL;
2792 	}
2793 
2794 	/**
2795 	 * No lock is needed because all PPDU TLVs are processed in
2796 	 * same context and this list is updated in same context
2797 	 */
2798 	TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
2799 			ppdu_info_list_elem);
2800 	pdev->list_depth++;
2801 	return ppdu_info;
2802 }
2803 
2804 /**
2805  * dp_htt_process_tlv(): Function to process each PPDU TLVs
2806  * @pdev: DP pdev handle
2807  * @htt_t2h_msg: HTT target to host message
2808  *
2809  * return: ppdu_info per ppdu tlv structure
2810  */
2811 
2812 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
2813 		qdf_nbuf_t htt_t2h_msg)
2814 {
2815 	uint32_t length;
2816 	uint32_t ppdu_id;
2817 	uint8_t tlv_type;
2818 	uint32_t tlv_length, tlv_bitmap_expected;
2819 	uint8_t *tlv_buf;
2820 	struct ppdu_info *ppdu_info = NULL;
2821 	struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
2822 
2823 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2824 
2825 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2826 
2827 	msg_word = msg_word + 1;
2828 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2829 
2830 
2831 	msg_word = msg_word + 3;
2832 	while (length > 0) {
2833 		tlv_buf = (uint8_t *)msg_word;
2834 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2835 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2836 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2837 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2838 
2839 		if (tlv_length == 0)
2840 			break;
2841 
2842 		tlv_length += HTT_TLV_HDR_LEN;
2843 
2844 		/**
2845 		 * Not allocating separate ppdu descriptor for MGMT Payload
2846 		 * TLV as this is sent as separate WDI indication and it
2847 		 * doesn't contain any ppdu information
2848 		 */
2849 		if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
2850 			pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
2851 			pdev->mgmtctrl_frm_info.mgmt_buf_len = tlv_length;
2852 			pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
2853 			msg_word =
2854 				(uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2855 			length -= (tlv_length);
2856 			continue;
2857 		}
2858 
2859 		ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
2860 		if (!ppdu_info)
2861 			return NULL;
2862 		ppdu_info->ppdu_id = ppdu_id;
2863 		ppdu_info->tlv_bitmap |= (1 << tlv_type);
2864 
2865 		dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
2866 
2867 		/**
2868 		 * Increment pdev level tlv count to monitor
2869 		 * missing TLVs
2870 		 */
2871 		pdev->tlv_count++;
2872 		ppdu_info->last_tlv_cnt = pdev->tlv_count;
2873 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2874 		length -= (tlv_length);
2875 	}
2876 
2877 	if (!ppdu_info)
2878 		return NULL;
2879 
2880 	pdev->last_ppdu_id = ppdu_id;
2881 
2882 	tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
2883 
2884 	ppdu_desc = ppdu_info->ppdu_desc;
2885 	if (ppdu_desc &&
2886 	    ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
2887 	    HTT_PPDU_STATS_USER_STATUS_OK) {
2888 		tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
2889 	}
2890 
2891 	if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2892 		if (ppdu_info->is_ampdu)
2893 			tlv_bitmap_expected =
2894 				dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
2895 					ppdu_info->tlv_bitmap);
2896 	}
2897 
2898 	/**
2899 	 * Once all the TLVs for a given PPDU has been processed,
2900 	 * return PPDU status to be delivered to higher layer
2901 	 */
2902 	if (ppdu_info->tlv_bitmap != 0 &&
2903 	    ppdu_info->tlv_bitmap == tlv_bitmap_expected)
2904 		return ppdu_info;
2905 
2906 	return NULL;
2907 }
2908 #endif /* FEATURE_PERPKT_INFO */
2909 
2910 /**
2911  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2912  * @soc: DP SOC handle
2913  * @pdev_id: pdev id
2914  * @htt_t2h_msg: HTT message nbuf
2915  *
2916  * return:void
2917  */
2918 #if defined(WDI_EVENT_ENABLE)
2919 #ifdef FEATURE_PERPKT_INFO
2920 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2921 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2922 {
2923 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2924 	struct ppdu_info *ppdu_info = NULL;
2925 	bool free_buf = true;
2926 
2927 	if (!pdev)
2928 		return true;
2929 
2930 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2931 	    !pdev->mcopy_mode && !pdev->bpr_enable)
2932 		return free_buf;
2933 
2934 	ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
2935 
2936 	if (pdev->mgmtctrl_frm_info.mgmt_buf) {
2937 		if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
2938 		    (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
2939 		    QDF_STATUS_SUCCESS)
2940 			free_buf = false;
2941 
2942 		if (free_buf) {
2943 			pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
2944 			pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
2945 			pdev->mgmtctrl_frm_info.ppdu_id = 0;
2946 		}
2947 	}
2948 
2949 	if (ppdu_info)
2950 		dp_ppdu_desc_deliver(pdev, ppdu_info);
2951 
2952 	return free_buf;
2953 }
2954 #else
2955 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2956 				       uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2957 {
2958 	return true;
2959 }
2960 #endif
2961 #endif
2962 
2963 /**
2964  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2965  * @soc: DP SOC handle
2966  * @htt_t2h_msg: HTT message nbuf
2967  *
2968  * return:void
2969  */
2970 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2971 		qdf_nbuf_t htt_t2h_msg)
2972 {
2973 	uint8_t done;
2974 	qdf_nbuf_t msg_copy;
2975 	uint32_t *msg_word;
2976 
2977 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2978 	msg_word = msg_word + 3;
2979 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2980 
2981 	/*
2982 	 * HTT EXT stats response comes as stream of TLVs which span over
2983 	 * multiple T2H messages.
2984 	 * The first message will carry length of the response.
2985 	 * For rest of the messages length will be zero.
2986 	 *
2987 	 * Clone the T2H message buffer and store it in a list to process
2988 	 * it later.
2989 	 *
2990 	 * The original T2H message buffers gets freed in the T2H HTT event
2991 	 * handler
2992 	 */
2993 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2994 
2995 	if (!msg_copy) {
2996 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2997 				"T2H messge clone failed for HTT EXT STATS");
2998 		goto error;
2999 	}
3000 
3001 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3002 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
3003 	/*
3004 	 * Done bit signifies that this is the last T2H buffer in the stream of
3005 	 * HTT EXT STATS message
3006 	 */
3007 	if (done) {
3008 		soc->htt_stats.num_stats++;
3009 		qdf_sched_work(0, &soc->htt_stats.work);
3010 	}
3011 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3012 
3013 	return;
3014 
3015 error:
3016 	qdf_spin_lock_bh(&soc->htt_stats.lock);
3017 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
3018 			!= NULL) {
3019 		qdf_nbuf_free(msg_copy);
3020 	}
3021 	soc->htt_stats.num_stats = 0;
3022 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
3023 	return;
3024 
3025 }
3026 
3027 /*
3028  * htt_soc_attach_target() - SOC level HTT setup
3029  * @htt_soc:	HTT SOC handle
3030  *
3031  * Return: 0 on success; error code on failure
3032  */
3033 int htt_soc_attach_target(void *htt_soc)
3034 {
3035 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3036 
3037 	return htt_h2t_ver_req_msg(soc);
3038 }
3039 
3040 
3041 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
3042 /*
3043  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
3044  * @htt_soc:	 HTT SOC handle
3045  * @msg_word:    Pointer to payload
3046  * @htt_t2h_msg: HTT msg nbuf
3047  *
3048  * Return: True if buffer should be freed by caller.
3049  */
3050 static bool
3051 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3052 				uint32_t *msg_word,
3053 				qdf_nbuf_t htt_t2h_msg)
3054 {
3055 	u_int8_t pdev_id;
3056 	bool free_buf;
3057 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
3058 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
3059 	pdev_id = DP_HW2SW_MACID(pdev_id);
3060 	free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
3061 					      htt_t2h_msg);
3062 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
3063 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
3064 		pdev_id);
3065 	return free_buf;
3066 }
3067 #else
3068 static bool
3069 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3070 				uint32_t *msg_word,
3071 				qdf_nbuf_t htt_t2h_msg)
3072 {
3073 	return true;
3074 }
3075 #endif
3076 
3077 #if defined(WDI_EVENT_ENABLE) && \
3078 	!defined(REMOVE_PKT_LOG)
3079 /*
3080  * dp_pktlog_msg_handler() - Pktlog msg handler
3081  * @htt_soc:	 HTT SOC handle
3082  * @msg_word:    Pointer to payload
3083  *
3084  * Return: None
3085  */
3086 static void
3087 dp_pktlog_msg_handler(struct htt_soc *soc,
3088 		      uint32_t *msg_word)
3089 {
3090 	uint8_t pdev_id;
3091 	uint32_t *pl_hdr;
3092 
3093 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
3094 	pdev_id = DP_HW2SW_MACID(pdev_id);
3095 	pl_hdr = (msg_word + 1);
3096 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
3097 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
3098 		pdev_id);
3099 }
3100 #else
3101 static void
3102 dp_pktlog_msg_handler(struct htt_soc *soc,
3103 		      uint32_t *msg_word)
3104 {
3105 }
3106 #endif
3107 /*
3108  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3109  * @context:	Opaque context (HTT SOC handle)
3110  * @pkt:	HTC packet
3111  */
3112 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3113 {
3114 	struct htt_soc *soc = (struct htt_soc *) context;
3115 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3116 	u_int32_t *msg_word;
3117 	enum htt_t2h_msg_type msg_type;
3118 	bool free_buf = true;
3119 
3120 	/* check for successful message reception */
3121 	if (pkt->Status != QDF_STATUS_SUCCESS) {
3122 		if (pkt->Status != QDF_STATUS_E_CANCELED)
3123 			soc->stats.htc_err_cnt++;
3124 
3125 		qdf_nbuf_free(htt_t2h_msg);
3126 		return;
3127 	}
3128 
3129 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
3130 
3131 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3132 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3133 	switch (msg_type) {
3134 	case HTT_T2H_MSG_TYPE_PEER_MAP:
3135 		{
3136 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3137 			u_int8_t *peer_mac_addr;
3138 			u_int16_t peer_id;
3139 			u_int16_t hw_peer_id;
3140 			u_int8_t vdev_id;
3141 			u_int8_t is_wds;
3142 			struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
3143 
3144 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
3145 			hw_peer_id =
3146 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
3147 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3148 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3149 				(u_int8_t *) (msg_word+1),
3150 				&mac_addr_deswizzle_buf[0]);
3151 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3152 				QDF_TRACE_LEVEL_INFO,
3153 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3154 				peer_id, vdev_id);
3155 
3156 			/*
3157 			 * check if peer already exists for this peer_id, if so
3158 			 * this peer map event is in response for a wds peer add
3159 			 * wmi command sent during wds source port learning.
3160 			 * in this case just add the ast entry to the existing
3161 			 * peer ast_list.
3162 			 */
3163 			is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
3164 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
3165 					       vdev_id, peer_mac_addr, 0,
3166 					       is_wds);
3167 			break;
3168 		}
3169 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3170 		{
3171 			u_int16_t peer_id;
3172 			u_int8_t vdev_id;
3173 			u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3174 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
3175 			vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
3176 
3177 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3178 						 vdev_id, mac_addr, 0);
3179 			break;
3180 		}
3181 	case HTT_T2H_MSG_TYPE_SEC_IND:
3182 		{
3183 			u_int16_t peer_id;
3184 			enum cdp_sec_type sec_type;
3185 			int is_unicast;
3186 
3187 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3188 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3189 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3190 			/* point to the first part of the Michael key */
3191 			msg_word++;
3192 			dp_rx_sec_ind_handler(
3193 				soc->dp_soc, peer_id, sec_type, is_unicast,
3194 				msg_word, msg_word + 2);
3195 			break;
3196 		}
3197 
3198 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3199 		{
3200 			free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
3201 							     htt_t2h_msg);
3202 			break;
3203 		}
3204 
3205 	case HTT_T2H_MSG_TYPE_PKTLOG:
3206 		{
3207 			dp_pktlog_msg_handler(soc, msg_word);
3208 			break;
3209 		}
3210 
3211 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
3212 		{
3213 			htc_pm_runtime_put(soc->htc_soc);
3214 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3215 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3216 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3217 				"target uses HTT version %d.%d; host uses %d.%d",
3218 				soc->tgt_ver.major, soc->tgt_ver.minor,
3219 				HTT_CURRENT_VERSION_MAJOR,
3220 				HTT_CURRENT_VERSION_MINOR);
3221 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3222 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3223 					QDF_TRACE_LEVEL_ERROR,
3224 					"*** Incompatible host/target HTT versions!");
3225 			}
3226 			/* abort if the target is incompatible with the host */
3227 			qdf_assert(soc->tgt_ver.major ==
3228 				HTT_CURRENT_VERSION_MAJOR);
3229 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3230 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3231 					QDF_TRACE_LEVEL_WARN,
3232 					"*** Warning: host/target HTT versions"
3233 					" are different, though compatible!");
3234 			}
3235 			break;
3236 		}
3237 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
3238 		{
3239 			uint16_t peer_id;
3240 			uint8_t tid;
3241 			uint8_t win_sz;
3242 			uint16_t status;
3243 			struct dp_peer *peer;
3244 
3245 			/*
3246 			 * Update REO Queue Desc with new values
3247 			 */
3248 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3249 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3250 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3251 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3252 
3253 			/*
3254 			 * Window size needs to be incremented by 1
3255 			 * since fw needs to represent a value of 256
3256 			 * using just 8 bits
3257 			 */
3258 			if (peer) {
3259 				status = dp_addba_requestprocess_wifi3(peer,
3260 						0, tid, 0, win_sz + 1, 0xffff);
3261 
3262 				/*
3263 				 * If PEER_LOCK_REF_PROTECT enbled dec ref
3264 				 * which is inc by dp_peer_find_by_id
3265 				 */
3266 				dp_peer_unref_del_find_by_id(peer);
3267 
3268 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3269 					QDF_TRACE_LEVEL_INFO,
3270 					FL("PeerID %d BAW %d TID %d stat %d"),
3271 					peer_id, win_sz, tid, status);
3272 
3273 			} else {
3274 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3275 					QDF_TRACE_LEVEL_ERROR,
3276 					FL("Peer not found peer id %d"),
3277 					peer_id);
3278 			}
3279 			break;
3280 		}
3281 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3282 		{
3283 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3284 			break;
3285 		}
3286 	case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3287 		{
3288 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3289 			u_int8_t *peer_mac_addr;
3290 			u_int16_t peer_id;
3291 			u_int16_t hw_peer_id;
3292 			u_int8_t vdev_id;
3293 			bool is_wds;
3294 			u_int16_t ast_hash;
3295 
3296 			peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3297 			hw_peer_id =
3298 			HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3299 			vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3300 			peer_mac_addr =
3301 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3302 						   &mac_addr_deswizzle_buf[0]);
3303 			is_wds =
3304 			HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3305 			ast_hash =
3306 			HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
3307 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3308 				  QDF_TRACE_LEVEL_INFO,
3309 				  "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3310 				  peer_id, vdev_id);
3311 
3312 			dp_rx_peer_map_handler(soc->dp_soc, peer_id,
3313 					       hw_peer_id, vdev_id,
3314 					       peer_mac_addr, ast_hash,
3315 					       is_wds);
3316 			break;
3317 		}
3318 	case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
3319 		{
3320 			u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
3321 			u_int8_t *mac_addr;
3322 			u_int16_t peer_id;
3323 			u_int8_t vdev_id;
3324 			u_int8_t is_wds;
3325 
3326 			peer_id =
3327 			HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
3328 			vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
3329 			mac_addr =
3330 			htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3331 						   &mac_addr_deswizzle_buf[0]);
3332 			is_wds =
3333 			HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
3334 			QDF_TRACE(QDF_MODULE_ID_TXRX,
3335 				  QDF_TRACE_LEVEL_INFO,
3336 				  "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
3337 				  peer_id, vdev_id);
3338 
3339 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
3340 						 vdev_id, mac_addr,
3341 						 is_wds);
3342 			break;
3343 		}
3344 	default:
3345 		break;
3346 	};
3347 
3348 	/* Free the indication buffer */
3349 	if (free_buf)
3350 		qdf_nbuf_free(htt_t2h_msg);
3351 }
3352 
3353 /*
3354  * dp_htt_h2t_full() - Send full handler (called from HTC)
3355  * @context:	Opaque context (HTT SOC handle)
3356  * @pkt:	HTC packet
3357  *
3358  * Return: enum htc_send_full_action
3359  */
3360 static enum htc_send_full_action
3361 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
3362 {
3363 	return HTC_SEND_FULL_KEEP;
3364 }
3365 
3366 /*
3367  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
3368  * @context:	Opaque context (HTT SOC handle)
3369  * @nbuf:	nbuf containing T2H message
3370  * @pipe_id:	HIF pipe ID
3371  *
3372  * Return: QDF_STATUS
3373  *
3374  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
3375  * will be used for packet log and other high-priority HTT messages. Proper
3376  * HTC connection to be added later once required FW changes are available
3377  */
3378 static QDF_STATUS
3379 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
3380 {
3381 	QDF_STATUS rc = QDF_STATUS_SUCCESS;
3382 	HTC_PACKET htc_pkt;
3383 
3384 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
3385 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
3386 	htc_pkt.Status = QDF_STATUS_SUCCESS;
3387 	htc_pkt.pPktContext = (void *)nbuf;
3388 	dp_htt_t2h_msg_handler(context, &htc_pkt);
3389 
3390 	return rc;
3391 }
3392 
3393 /*
3394  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
3395  * @htt_soc:	HTT SOC handle
3396  *
3397  * Return: QDF_STATUS
3398  */
3399 static QDF_STATUS
3400 htt_htc_soc_attach(struct htt_soc *soc)
3401 {
3402 	struct htc_service_connect_req connect;
3403 	struct htc_service_connect_resp response;
3404 	QDF_STATUS status;
3405 	struct dp_soc *dpsoc = soc->dp_soc;
3406 
3407 	qdf_mem_zero(&connect, sizeof(connect));
3408 	qdf_mem_zero(&response, sizeof(response));
3409 
3410 	connect.pMetaData = NULL;
3411 	connect.MetaDataLength = 0;
3412 	connect.EpCallbacks.pContext = soc;
3413 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
3414 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
3415 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
3416 
3417 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
3418 	connect.EpCallbacks.EpRecvRefill = NULL;
3419 
3420 	/* N/A, fill is done by HIF */
3421 	connect.EpCallbacks.RecvRefillWaterMark = 1;
3422 
3423 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
3424 	/*
3425 	 * Specify how deep to let a queue get before htc_send_pkt will
3426 	 * call the EpSendFull function due to excessive send queue depth.
3427 	 */
3428 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
3429 
3430 	/* disable flow control for HTT data message service */
3431 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
3432 
3433 	/* connect to control service */
3434 	connect.service_id = HTT_DATA_MSG_SVC;
3435 
3436 	status = htc_connect_service(soc->htc_soc, &connect, &response);
3437 
3438 	if (status != QDF_STATUS_SUCCESS)
3439 		return status;
3440 
3441 	soc->htc_endpoint = response.Endpoint;
3442 
3443 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
3444 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
3445 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
3446 
3447 	return QDF_STATUS_SUCCESS; /* success */
3448 }
3449 
3450 /*
3451  * htt_soc_initialize() - SOC level HTT initialization
3452  * @htt_soc: Opaque htt SOC handle
3453  * @ctrl_psoc: Opaque ctrl SOC handle
3454  * @htc_soc: SOC level HTC handle
3455  * @hal_soc: Opaque HAL SOC handle
3456  * @osdev: QDF device
3457  *
3458  * Return: HTT handle on success; NULL on failure
3459  */
3460 void *
3461 htt_soc_initialize(void *htt_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
3462 		   void *hal_soc, qdf_device_t osdev)
3463 {
3464 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
3465 
3466 	soc->osdev = osdev;
3467 	soc->ctrl_psoc = ctrl_psoc;
3468 	soc->htc_soc = htc_soc;
3469 	soc->hal_soc = hal_soc;
3470 
3471 	if (htt_htc_soc_attach(soc))
3472 		goto fail2;
3473 
3474 	return soc;
3475 
3476 fail2:
3477 	return NULL;
3478 }
3479 
3480 void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
3481 {
3482 	htt_htc_misc_pkt_pool_free(htt_handle);
3483 	htt_htc_pkt_pool_free(htt_handle);
3484 }
3485 
3486 /*
3487  * htt_soc_htc_prealloc() - HTC memory prealloc
3488  * @htt_soc: SOC level HTT handle
3489  *
3490  * Return: QDF_STATUS_SUCCESS on Success or
3491  * QDF_STATUS_E_NOMEM on allocation failure
3492  */
3493 QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
3494 {
3495 	int i;
3496 
3497 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
3498 
3499 	soc->htt_htc_pkt_freelist = NULL;
3500 	/* pre-allocate some HTC_PACKET objects */
3501 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
3502 		struct dp_htt_htc_pkt_union *pkt;
3503 		pkt = qdf_mem_malloc(sizeof(*pkt));
3504 		if (!pkt)
3505 			return QDF_STATUS_E_NOMEM;
3506 
3507 		htt_htc_pkt_free(soc, &pkt->u.pkt);
3508 	}
3509 	return QDF_STATUS_SUCCESS;
3510 }
3511 
3512 /*
3513  * htt_soc_detach() - Free SOC level HTT handle
3514  * @htt_hdl: HTT SOC handle
3515  */
3516 void htt_soc_detach(void *htt_hdl)
3517 {
3518 	struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
3519 
3520 	HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
3521 	qdf_mem_free(htt_handle);
3522 }
3523 
3524 /**
3525  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
3526  * @pdev: DP PDEV handle
3527  * @stats_type_upload_mask: stats type requested by user
3528  * @config_param_0: extra configuration parameters
3529  * @config_param_1: extra configuration parameters
3530  * @config_param_2: extra configuration parameters
3531  * @config_param_3: extra configuration parameters
3532  * @mac_id: mac number
3533  *
3534  * return: QDF STATUS
3535  */
3536 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
3537 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
3538 		uint32_t config_param_1, uint32_t config_param_2,
3539 		uint32_t config_param_3, int cookie_val, int cookie_msb,
3540 		uint8_t mac_id)
3541 {
3542 	struct htt_soc *soc = pdev->soc->htt_handle;
3543 	struct dp_htt_htc_pkt *pkt;
3544 	qdf_nbuf_t msg;
3545 	uint32_t *msg_word;
3546 	uint8_t pdev_mask = 0;
3547 
3548 	msg = qdf_nbuf_alloc(
3549 			soc->osdev,
3550 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
3551 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
3552 
3553 	if (!msg)
3554 		return QDF_STATUS_E_NOMEM;
3555 
3556 	/*TODO:Add support for SOC stats
3557 	 * Bit 0: SOC Stats
3558 	 * Bit 1: Pdev stats for pdev id 0
3559 	 * Bit 2: Pdev stats for pdev id 1
3560 	 * Bit 3: Pdev stats for pdev id 2
3561 	 */
3562 	mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
3563 
3564 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3565 	/*
3566 	 * Set the length of the message.
3567 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3568 	 * separately during the below call to qdf_nbuf_push_head.
3569 	 * The contribution from the HTC header is added separately inside HTC.
3570 	 */
3571 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
3572 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3573 				"Failed to expand head for HTT_EXT_STATS");
3574 		qdf_nbuf_free(msg);
3575 		return QDF_STATUS_E_FAILURE;
3576 	}
3577 
3578 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3579 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
3580 		"config_param_1 %u\n config_param_2 %u\n"
3581 		"config_param_4 %u\n -------------",
3582 		__func__, __LINE__, cookie_val, config_param_0,
3583 		config_param_1, config_param_2,	config_param_3);
3584 
3585 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3586 
3587 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3588 	*msg_word = 0;
3589 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
3590 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
3591 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
3592 
3593 	/* word 1 */
3594 	msg_word++;
3595 	*msg_word = 0;
3596 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
3597 
3598 	/* word 2 */
3599 	msg_word++;
3600 	*msg_word = 0;
3601 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
3602 
3603 	/* word 3 */
3604 	msg_word++;
3605 	*msg_word = 0;
3606 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
3607 
3608 	/* word 4 */
3609 	msg_word++;
3610 	*msg_word = 0;
3611 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
3612 
3613 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
3614 
3615 	/* word 5 */
3616 	msg_word++;
3617 
3618 	/* word 6 */
3619 	msg_word++;
3620 	*msg_word = 0;
3621 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
3622 
3623 	/* word 7 */
3624 	msg_word++;
3625 	*msg_word = 0;
3626 	/*Using last 2 bits for pdev_id */
3627 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
3628 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3629 
3630 	pkt = htt_htc_pkt_alloc(soc);
3631 	if (!pkt) {
3632 		qdf_nbuf_free(msg);
3633 		return QDF_STATUS_E_NOMEM;
3634 	}
3635 
3636 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3637 
3638 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3639 			dp_htt_h2t_send_complete_free_netbuf,
3640 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3641 			soc->htc_endpoint,
3642 			1); /* tag - not relevant here */
3643 
3644 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3645 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3646 	return 0;
3647 }
3648 
3649 /* This macro will revert once proper HTT header will define for
3650  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3651  * */
3652 #if defined(WDI_EVENT_ENABLE)
3653 /**
3654  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3655  * @pdev: DP PDEV handle
3656  * @stats_type_upload_mask: stats type requested by user
3657  * @mac_id: Mac id number
3658  *
3659  * return: QDF STATUS
3660  */
3661 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3662 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3663 {
3664 	struct htt_soc *soc = pdev->soc->htt_handle;
3665 	struct dp_htt_htc_pkt *pkt;
3666 	qdf_nbuf_t msg;
3667 	uint32_t *msg_word;
3668 	uint8_t pdev_mask;
3669 
3670 	msg = qdf_nbuf_alloc(
3671 			soc->osdev,
3672 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3673 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3674 
3675 	if (!msg) {
3676 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3677 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
3678 		qdf_assert(0);
3679 		return QDF_STATUS_E_NOMEM;
3680 	}
3681 
3682 	/*TODO:Add support for SOC stats
3683 	 * Bit 0: SOC Stats
3684 	 * Bit 1: Pdev stats for pdev id 0
3685 	 * Bit 2: Pdev stats for pdev id 1
3686 	 * Bit 3: Pdev stats for pdev id 2
3687 	 */
3688 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3689 
3690 	/*
3691 	 * Set the length of the message.
3692 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3693 	 * separately during the below call to qdf_nbuf_push_head.
3694 	 * The contribution from the HTC header is added separately inside HTC.
3695 	 */
3696 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3697 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3698 				"Failed to expand head for HTT_CFG_STATS");
3699 		qdf_nbuf_free(msg);
3700 		return QDF_STATUS_E_FAILURE;
3701 	}
3702 
3703 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3704 
3705 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3706 	*msg_word = 0;
3707 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3708 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3709 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3710 			stats_type_upload_mask);
3711 
3712 	pkt = htt_htc_pkt_alloc(soc);
3713 	if (!pkt) {
3714 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3715 				"Fail to allocate dp_htt_htc_pkt buffer");
3716 		qdf_assert(0);
3717 		qdf_nbuf_free(msg);
3718 		return QDF_STATUS_E_NOMEM;
3719 	}
3720 
3721 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3722 
3723 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3724 			dp_htt_h2t_send_complete_free_netbuf,
3725 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3726 			soc->htc_endpoint,
3727 			1); /* tag - not relevant here */
3728 
3729 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3730 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3731 	return 0;
3732 }
3733 #endif
3734 
3735 void
3736 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
3737 			     uint32_t *tag_buf)
3738 {
3739 	switch (tag_type) {
3740 	case HTT_STATS_PEER_DETAILS_TAG:
3741 	{
3742 		htt_peer_details_tlv *dp_stats_buf =
3743 			(htt_peer_details_tlv *)tag_buf;
3744 
3745 		pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
3746 	}
3747 	break;
3748 	case HTT_STATS_PEER_STATS_CMN_TAG:
3749 	{
3750 		htt_peer_stats_cmn_tlv *dp_stats_buf =
3751 			(htt_peer_stats_cmn_tlv *)tag_buf;
3752 
3753 		struct dp_peer *peer = dp_peer_find_by_id(pdev->soc,
3754 						pdev->fw_stats_peer_id);
3755 
3756 		if (peer && !peer->bss_peer) {
3757 			peer->stats.tx.inactive_time =
3758 				dp_stats_buf->inactive_time;
3759 			qdf_event_set(&pdev->fw_peer_stats_event);
3760 		}
3761 		if (peer)
3762 			dp_peer_unref_del_find_by_id(peer);
3763 	}
3764 	break;
3765 	default:
3766 		qdf_err("Invalid tag_type");
3767 	}
3768 }
3769