xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <htt.h>
20 #include <hal_api.h>
21 #include "dp_htt.h"
22 #include "dp_peer.h"
23 #include "dp_types.h"
24 #include "dp_internal.h"
25 #include "dp_rx_mon.h"
26 #include "htt_stats.h"
27 #include "htt_ppdu_stats.h"
28 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
29 #include "cdp_txrx_cmn_struct.h"
30 
31 #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
32 
33 #define HTT_HTC_PKT_POOL_INIT_SIZE 64
34 #define HTT_T2H_MAX_MSG_SIZE 2048
35 
36 #define HTT_MSG_BUF_SIZE(msg_bytes) \
37 	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
38 
39 #define HTT_PID_BIT_MASK 0x3
40 
41 #define DP_EXT_MSG_LENGTH 2048
42 #define DP_HTT_SEND_HTC_PKT(soc, pkt)                            \
43 do {                                                             \
44 	if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==         \
45 					QDF_STATUS_SUCCESS)      \
46 		htt_htc_misc_pkt_list_add(soc, pkt);             \
47 } while (0)
48 
49 #define HTT_MGMT_CTRL_TLV_RESERVERD_LEN 12
50 
51 /*
52  * dp_tx_stats_update() - Update per-peer statistics
53  * @soc: Datapath soc handle
54  * @peer: Datapath peer handle
55  * @ppdu: PPDU Descriptor
56  * @ack_rssi: RSSI of last ack received
57  *
58  * Return: None
59  */
60 #ifdef FEATURE_PERPKT_INFO
61 static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
62 		struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi)
63 {
64 	struct dp_pdev *pdev = peer->vdev->pdev;
65 	uint8_t preamble, mcs;
66 	uint16_t num_msdu;
67 
68 	preamble = ppdu->preamble;
69 	mcs = ppdu->mcs;
70 	num_msdu = ppdu->num_msdu;
71 
72 	/* If the peer statistics are already processed as part of
73 	 * per-MSDU completion handler, do not process these again in per-PPDU
74 	 * indications */
75 	if (soc->process_tx_status)
76 		return;
77 
78 	DP_STATS_INC_PKT(peer, tx.comp_pkt,
79 			num_msdu, (ppdu->success_bytes +
80 				ppdu->retry_bytes + ppdu->failed_bytes));
81 	DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
82 	DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
83 	DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
84 	DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
85 	DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
86 	DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu);
87 	DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
88 	DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
89 	if (!(ppdu->is_mcast))
90 		DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
91 
92 	DP_STATS_INC(peer, tx.retries,
93 			(ppdu->long_retries + ppdu->short_retries));
94 	DP_STATS_INCC(peer,
95 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
96 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
97 	DP_STATS_INCC(peer,
98 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
99 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
100 	DP_STATS_INCC(peer,
101 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
102 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
103 	DP_STATS_INCC(peer,
104 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
105 			((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
106 	DP_STATS_INCC(peer,
107 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
108 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
109 	DP_STATS_INCC(peer,
110 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
111 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
112 	DP_STATS_INCC(peer,
113 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
114 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
115 	DP_STATS_INCC(peer,
116 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
117 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
118 	DP_STATS_INCC(peer,
119 			tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
120 			((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
121 	DP_STATS_INCC(peer,
122 			tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
123 			((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
124 
125 	if (soc->cdp_soc.ol_ops->update_dp_stats) {
126 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
127 				&peer->stats, ppdu->peer_id,
128 				UPDATE_PEER_STATS);
129 
130 		dp_aggregate_vdev_stats(peer->vdev);
131 	}
132 }
133 #endif
134 
135 /*
136  * htt_htc_pkt_alloc() - Allocate HTC packet buffer
137  * @htt_soc:	HTT SOC handle
138  *
139  * Return: Pointer to htc packet buffer
140  */
141 static struct dp_htt_htc_pkt *
142 htt_htc_pkt_alloc(struct htt_soc *soc)
143 {
144 	struct dp_htt_htc_pkt_union *pkt = NULL;
145 
146 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
147 	if (soc->htt_htc_pkt_freelist) {
148 		pkt = soc->htt_htc_pkt_freelist;
149 		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
150 	}
151 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
152 
153 	if (pkt == NULL)
154 		pkt = qdf_mem_malloc(sizeof(*pkt));
155 	return &pkt->u.pkt; /* not actually a dereference */
156 }
157 
158 /*
159  * htt_htc_pkt_free() - Free HTC packet buffer
160  * @htt_soc:	HTT SOC handle
161  */
162 static void
163 htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
164 {
165 	struct dp_htt_htc_pkt_union *u_pkt =
166 		(struct dp_htt_htc_pkt_union *)pkt;
167 
168 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
169 	u_pkt->u.next = soc->htt_htc_pkt_freelist;
170 	soc->htt_htc_pkt_freelist = u_pkt;
171 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
172 }
173 
174 /*
175  * htt_htc_pkt_pool_free() - Free HTC packet pool
176  * @htt_soc:	HTT SOC handle
177  */
178 static void
179 htt_htc_pkt_pool_free(struct htt_soc *soc)
180 {
181 	struct dp_htt_htc_pkt_union *pkt, *next;
182 	pkt = soc->htt_htc_pkt_freelist;
183 	while (pkt) {
184 		next = pkt->u.next;
185 		qdf_mem_free(pkt);
186 		pkt = next;
187 	}
188 	soc->htt_htc_pkt_freelist = NULL;
189 }
190 
191 /*
192  * htt_htc_misc_pkt_list_trim() - trim misc list
193  * @htt_soc: HTT SOC handle
194  * @level: max no. of pkts in list
195  */
196 static void
197 htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
198 {
199 	struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
200 	int i = 0;
201 	qdf_nbuf_t netbuf;
202 
203 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
204 	pkt = soc->htt_htc_pkt_misclist;
205 	while (pkt) {
206 		next = pkt->u.next;
207 		/* trim the out grown list*/
208 		if (++i > level) {
209 			netbuf =
210 				(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
211 			qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
212 			qdf_nbuf_free(netbuf);
213 			qdf_mem_free(pkt);
214 			pkt = NULL;
215 			if (prev)
216 				prev->u.next = NULL;
217 		}
218 		prev = pkt;
219 		pkt = next;
220 	}
221 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
222 }
223 
224 /*
225  * htt_htc_misc_pkt_list_add() - Add pkt to misc list
226  * @htt_soc:	HTT SOC handle
227  * @dp_htt_htc_pkt: pkt to be added to list
228  */
229 static void
230 htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
231 {
232 	struct dp_htt_htc_pkt_union *u_pkt =
233 				(struct dp_htt_htc_pkt_union *)pkt;
234 	int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
235 							pkt->htc_pkt.Endpoint)
236 				+ DP_HTT_HTC_PKT_MISCLIST_SIZE;
237 
238 	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
239 	if (soc->htt_htc_pkt_misclist) {
240 		u_pkt->u.next = soc->htt_htc_pkt_misclist;
241 		soc->htt_htc_pkt_misclist = u_pkt;
242 	} else {
243 		soc->htt_htc_pkt_misclist = u_pkt;
244 	}
245 	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
246 
247 	/* only ce pipe size + tx_queue_depth could possibly be in use
248 	 * free older packets in the misclist
249 	 */
250 	htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
251 }
252 
253 /*
254  * htt_htc_misc_pkt_pool_free() - free pkts in misc list
255  * @htt_soc:	HTT SOC handle
256  */
257 static void
258 htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
259 {
260 	struct dp_htt_htc_pkt_union *pkt, *next;
261 	qdf_nbuf_t netbuf;
262 
263 	pkt = soc->htt_htc_pkt_misclist;
264 
265 	while (pkt) {
266 		next = pkt->u.next;
267 		netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
268 		qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
269 
270 		soc->stats.htc_pkt_free++;
271 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
272 			 "%s: Pkt free count %d\n",
273 			 __func__, soc->stats.htc_pkt_free);
274 
275 		qdf_nbuf_free(netbuf);
276 		qdf_mem_free(pkt);
277 		pkt = next;
278 	}
279 	soc->htt_htc_pkt_misclist = NULL;
280 }
281 
282 /*
283  * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianess differ
284  * @tgt_mac_addr:	Target MAC
285  * @buffer:		Output buffer
286  */
287 static u_int8_t *
288 htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
289 {
290 #ifdef BIG_ENDIAN_HOST
291 	/*
292 	 * The host endianness is opposite of the target endianness.
293 	 * To make u_int32_t elements come out correctly, the target->host
294 	 * upload has swizzled the bytes in each u_int32_t element of the
295 	 * message.
296 	 * For byte-array message fields like the MAC address, this
297 	 * upload swizzling puts the bytes in the wrong order, and needs
298 	 * to be undone.
299 	 */
300 	buffer[0] = tgt_mac_addr[3];
301 	buffer[1] = tgt_mac_addr[2];
302 	buffer[2] = tgt_mac_addr[1];
303 	buffer[3] = tgt_mac_addr[0];
304 	buffer[4] = tgt_mac_addr[7];
305 	buffer[5] = tgt_mac_addr[6];
306 	return buffer;
307 #else
308 	/*
309 	 * The host endianness matches the target endianness -
310 	 * we can use the mac addr directly from the message buffer.
311 	 */
312 	return tgt_mac_addr;
313 #endif
314 }
315 
316 /*
317  * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
318  * @soc:	SOC handle
319  * @status:	Completion status
320  * @netbuf:	HTT buffer
321  */
322 static void
323 dp_htt_h2t_send_complete_free_netbuf(
324 	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
325 {
326 	qdf_nbuf_free(netbuf);
327 }
328 
329 /*
330  * dp_htt_h2t_send_complete() - H2T completion handler
331  * @context:	Opaque context (HTT SOC handle)
332  * @htc_pkt:	HTC packet
333  */
334 static void
335 dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
336 {
337 	void (*send_complete_part2)(
338 		void *soc, A_STATUS status, qdf_nbuf_t msdu);
339 	struct htt_soc *soc =  (struct htt_soc *) context;
340 	struct dp_htt_htc_pkt *htt_pkt;
341 	qdf_nbuf_t netbuf;
342 
343 	send_complete_part2 = htc_pkt->pPktContext;
344 
345 	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
346 
347 	/* process (free or keep) the netbuf that held the message */
348 	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
349 	/*
350 	 * adf sendcomplete is required for windows only
351 	 */
352 	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
353 	if (send_complete_part2 != NULL) {
354 		send_complete_part2(
355 			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
356 	}
357 	/* free the htt_htc_pkt / HTC_PACKET object */
358 	htt_htc_pkt_free(soc, htt_pkt);
359 }
360 
361 /*
362  * htt_h2t_ver_req_msg() - Send HTT version request message to target
363  * @htt_soc:	HTT SOC handle
364  *
365  * Return: 0 on success; error code on failure
366  */
367 static int htt_h2t_ver_req_msg(struct htt_soc *soc)
368 {
369 	struct dp_htt_htc_pkt *pkt;
370 	qdf_nbuf_t msg;
371 	uint32_t *msg_word;
372 
373 	msg = qdf_nbuf_alloc(
374 		soc->osdev,
375 		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
376 		/* reserve room for the HTC header */
377 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
378 	if (!msg)
379 		return QDF_STATUS_E_NOMEM;
380 
381 	/*
382 	 * Set the length of the message.
383 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
384 	 * separately during the below call to qdf_nbuf_push_head.
385 	 * The contribution from the HTC header is added separately inside HTC.
386 	 */
387 	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
388 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
389 			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg\n",
390 			__func__);
391 		return QDF_STATUS_E_FAILURE;
392 	}
393 
394 	/* fill in the message contents */
395 	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
396 
397 	/* rewind beyond alignment pad to get to the HTC header reserved area */
398 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
399 
400 	*msg_word = 0;
401 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
402 
403 	pkt = htt_htc_pkt_alloc(soc);
404 	if (!pkt) {
405 		qdf_nbuf_free(msg);
406 		return QDF_STATUS_E_FAILURE;
407 	}
408 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
409 
410 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
411 		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
412 		qdf_nbuf_len(msg), soc->htc_endpoint,
413 		1); /* tag - not relevant here */
414 
415 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
416 	DP_HTT_SEND_HTC_PKT(soc, pkt);
417 	return 0;
418 }
419 
420 /*
421  * htt_srng_setup() - Send SRNG setup message to target
422  * @htt_soc:	HTT SOC handle
423  * @mac_id:	MAC Id
424  * @hal_srng:	Opaque HAL SRNG pointer
425  * @hal_ring_type:	SRNG ring type
426  *
427  * Return: 0 on success; error code on failure
428  */
429 int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
430 	int hal_ring_type)
431 {
432 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
433 	struct dp_htt_htc_pkt *pkt;
434 	qdf_nbuf_t htt_msg;
435 	uint32_t *msg_word;
436 	struct hal_srng_params srng_params;
437 	qdf_dma_addr_t hp_addr, tp_addr;
438 	uint32_t ring_entry_size =
439 		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
440 	int htt_ring_type, htt_ring_id;
441 
442 	/* Sizes should be set in 4-byte words */
443 	ring_entry_size = ring_entry_size >> 2;
444 
445 	htt_msg = qdf_nbuf_alloc(soc->osdev,
446 		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
447 		/* reserve room for the HTC header */
448 		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
449 	if (!htt_msg)
450 		goto fail0;
451 
452 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
453 	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
454 	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
455 
456 	switch (hal_ring_type) {
457 	case RXDMA_BUF:
458 #ifdef QCA_HOST2FW_RXBUF_RING
459 		if (srng_params.ring_id ==
460 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
461 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
462 			htt_ring_type = HTT_SW_TO_SW_RING;
463 #ifdef IPA_OFFLOAD
464 		} else if (srng_params.ring_id ==
465 		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
466 			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
467 			htt_ring_type = HTT_SW_TO_SW_RING;
468 #endif
469 #else
470 		if (srng_params.ring_id ==
471 			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
472 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
473 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
474 			htt_ring_type = HTT_SW_TO_HW_RING;
475 #endif
476 		} else if (srng_params.ring_id ==
477 #ifdef IPA_OFFLOAD
478 			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
479 #else
480 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
481 #endif
482 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
483 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
484 			htt_ring_type = HTT_SW_TO_HW_RING;
485 		} else {
486 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
487 				   "%s: Ring %d currently not supported\n",
488 				   __func__, srng_params.ring_id);
489 			goto fail1;
490 		}
491 
492 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
493 			 "%s: ring_type %d ring_id %d\n",
494 			 __func__, hal_ring_type, srng_params.ring_id);
495 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
496 			 "%s: hp_addr 0x%llx tp_addr 0x%llx\n",
497 			 __func__, (uint64_t)hp_addr, (uint64_t)tp_addr);
498 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
499 			 "%s: htt_ring_id %d\n", __func__, htt_ring_id);
500 		break;
501 	case RXDMA_MONITOR_BUF:
502 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
503 		htt_ring_type = HTT_SW_TO_HW_RING;
504 		break;
505 	case RXDMA_MONITOR_STATUS:
506 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
507 		htt_ring_type = HTT_SW_TO_HW_RING;
508 		break;
509 	case RXDMA_MONITOR_DST:
510 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
511 		htt_ring_type = HTT_HW_TO_SW_RING;
512 		break;
513 	case RXDMA_MONITOR_DESC:
514 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
515 		htt_ring_type = HTT_SW_TO_HW_RING;
516 		break;
517 	case RXDMA_DST:
518 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
519 		htt_ring_type = HTT_HW_TO_SW_RING;
520 		break;
521 
522 	default:
523 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
524 			"%s: Ring currently not supported\n", __func__);
525 			goto fail1;
526 	}
527 
528 	/*
529 	 * Set the length of the message.
530 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
531 	 * separately during the below call to qdf_nbuf_push_head.
532 	 * The contribution from the HTC header is added separately inside HTC.
533 	 */
534 	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
535 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
536 			"%s: Failed to expand head for SRING_SETUP msg\n",
537 			__func__);
538 		return QDF_STATUS_E_FAILURE;
539 	}
540 
541 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
542 
543 	/* rewind beyond alignment pad to get to the HTC header reserved area */
544 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
545 
546 	/* word 0 */
547 	*msg_word = 0;
548 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
549 
550 	if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
551 			(htt_ring_type == HTT_HW_TO_SW_RING))
552 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
553 			 DP_SW2HW_MACID(mac_id));
554 	else
555 		HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
556 
557 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
558 			 "%s: mac_id %d\n", __func__, mac_id);
559 	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
560 	/* TODO: Discuss with FW on changing this to unique ID and using
561 	 * htt_ring_type to send the type of ring
562 	 */
563 	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
564 
565 	/* word 1 */
566 	msg_word++;
567 	*msg_word = 0;
568 	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
569 		srng_params.ring_base_paddr & 0xffffffff);
570 
571 	/* word 2 */
572 	msg_word++;
573 	*msg_word = 0;
574 	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
575 		(uint64_t)srng_params.ring_base_paddr >> 32);
576 
577 	/* word 3 */
578 	msg_word++;
579 	*msg_word = 0;
580 	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
581 	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
582 		(ring_entry_size * srng_params.num_entries));
583 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
584 			 "%s: entry_size %d\n", __func__,
585 			 ring_entry_size);
586 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
587 			 "%s: num_entries %d\n", __func__,
588 			 srng_params.num_entries);
589 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
590 			 "%s: ring_size %d\n", __func__,
591 			 (ring_entry_size * srng_params.num_entries));
592 	if (htt_ring_type == HTT_SW_TO_HW_RING)
593 		HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
594 						*msg_word, 1);
595 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
596 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
597 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
598 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
599 	HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
600 		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
601 
602 	/* word 4 */
603 	msg_word++;
604 	*msg_word = 0;
605 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
606 		hp_addr & 0xffffffff);
607 
608 	/* word 5 */
609 	msg_word++;
610 	*msg_word = 0;
611 	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
612 		(uint64_t)hp_addr >> 32);
613 
614 	/* word 6 */
615 	msg_word++;
616 	*msg_word = 0;
617 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
618 		tp_addr & 0xffffffff);
619 
620 	/* word 7 */
621 	msg_word++;
622 	*msg_word = 0;
623 	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
624 		(uint64_t)tp_addr >> 32);
625 
626 	/* word 8 */
627 	msg_word++;
628 	*msg_word = 0;
629 	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
630 		srng_params.msi_addr & 0xffffffff);
631 
632 	/* word 9 */
633 	msg_word++;
634 	*msg_word = 0;
635 	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
636 		(uint64_t)(srng_params.msi_addr) >> 32);
637 
638 	/* word 10 */
639 	msg_word++;
640 	*msg_word = 0;
641 	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
642 		srng_params.msi_data);
643 
644 	/* word 11 */
645 	msg_word++;
646 	*msg_word = 0;
647 	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
648 		srng_params.intr_batch_cntr_thres_entries *
649 		ring_entry_size);
650 	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
651 		srng_params.intr_timer_thres_us >> 3);
652 
653 	/* word 12 */
654 	msg_word++;
655 	*msg_word = 0;
656 	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
657 		/* TODO: Setting low threshold to 1/8th of ring size - see
658 		 * if this needs to be configurable
659 		 */
660 		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
661 			srng_params.low_threshold);
662 	}
663 	/* "response_required" field should be set if a HTT response message is
664 	 * required after setting up the ring.
665 	 */
666 	pkt = htt_htc_pkt_alloc(soc);
667 	if (!pkt)
668 		goto fail1;
669 
670 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
671 
672 	SET_HTC_PACKET_INFO_TX(
673 		&pkt->htc_pkt,
674 		dp_htt_h2t_send_complete_free_netbuf,
675 		qdf_nbuf_data(htt_msg),
676 		qdf_nbuf_len(htt_msg),
677 		soc->htc_endpoint,
678 		HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
679 
680 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
681 	DP_HTT_SEND_HTC_PKT(soc, pkt);
682 
683 	return QDF_STATUS_SUCCESS;
684 
685 fail1:
686 	qdf_nbuf_free(htt_msg);
687 fail0:
688 	return QDF_STATUS_E_FAILURE;
689 }
690 
691 /*
692  * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
693  * config message to target
694  * @htt_soc:	HTT SOC handle
695  * @pdev_id:	PDEV Id
696  * @hal_srng:	Opaque HAL SRNG pointer
697  * @hal_ring_type:	SRNG ring type
698  * @ring_buf_size:	SRNG buffer size
699  * @htt_tlv_filter:	Rx SRNG TLV and filter setting
700  * Return: 0 on success; error code on failure
701  */
702 int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
703 	int hal_ring_type, int ring_buf_size,
704 	struct htt_rx_ring_tlv_filter *htt_tlv_filter)
705 {
706 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
707 	struct dp_htt_htc_pkt *pkt;
708 	qdf_nbuf_t htt_msg;
709 	uint32_t *msg_word;
710 	struct hal_srng_params srng_params;
711 	uint32_t htt_ring_type, htt_ring_id;
712 	uint32_t tlv_filter;
713 
714 	htt_msg = qdf_nbuf_alloc(soc->osdev,
715 		HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
716 	/* reserve room for the HTC header */
717 	HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
718 	if (!htt_msg)
719 		goto fail0;
720 
721 	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
722 
723 	switch (hal_ring_type) {
724 	case RXDMA_BUF:
725 #if QCA_HOST2FW_RXBUF_RING
726 		htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
727 		htt_ring_type = HTT_SW_TO_SW_RING;
728 #else
729 		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
730 		htt_ring_type = HTT_SW_TO_HW_RING;
731 #endif
732 		break;
733 	case RXDMA_MONITOR_BUF:
734 		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
735 		htt_ring_type = HTT_SW_TO_HW_RING;
736 		break;
737 	case RXDMA_MONITOR_STATUS:
738 		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
739 		htt_ring_type = HTT_SW_TO_HW_RING;
740 		break;
741 	case RXDMA_MONITOR_DST:
742 		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
743 		htt_ring_type = HTT_HW_TO_SW_RING;
744 		break;
745 	case RXDMA_MONITOR_DESC:
746 		htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
747 		htt_ring_type = HTT_SW_TO_HW_RING;
748 		break;
749 	case RXDMA_DST:
750 		htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
751 		htt_ring_type = HTT_HW_TO_SW_RING;
752 		break;
753 
754 	default:
755 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
756 			"%s: Ring currently not supported\n", __func__);
757 		goto fail1;
758 	}
759 
760 	/*
761 	 * Set the length of the message.
762 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
763 	 * separately during the below call to qdf_nbuf_push_head.
764 	 * The contribution from the HTC header is added separately inside HTC.
765 	 */
766 	if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
767 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
768 			"%s: Failed to expand head for RX Ring Cfg msg\n",
769 			__func__);
770 		goto fail1; /* failure */
771 	}
772 
773 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
774 
775 	/* rewind beyond alignment pad to get to the HTC header reserved area */
776 	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
777 
778 	/* word 0 */
779 	*msg_word = 0;
780 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
781 
782 	/*
783 	 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
784 	 * SW_TO_SW and SW_TO_HW rings are unaffected by this
785 	 */
786 	if (htt_ring_type == HTT_SW_TO_SW_RING ||
787 			htt_ring_type == HTT_SW_TO_HW_RING)
788 		HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
789 						DP_SW2HW_MACID(pdev_id));
790 
791 	/* TODO: Discuss with FW on changing this to unique ID and using
792 	 * htt_ring_type to send the type of ring
793 	 */
794 	HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
795 
796 	HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
797 		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
798 
799 	HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
800 		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
801 
802 	/* word 1 */
803 	msg_word++;
804 	*msg_word = 0;
805 	HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
806 		ring_buf_size);
807 
808 	/* word 2 */
809 	msg_word++;
810 	*msg_word = 0;
811 
812 	if (htt_tlv_filter->enable_fp) {
813 		/* TYPE: MGMT */
814 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
815 			FP, MGMT, 0000,
816 			(htt_tlv_filter->fp_mgmt_filter &
817 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
818 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
819 			FP, MGMT, 0001,
820 			(htt_tlv_filter->fp_mgmt_filter &
821 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
822 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
823 			FP, MGMT, 0010,
824 			(htt_tlv_filter->fp_mgmt_filter &
825 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
826 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
827 			FP, MGMT, 0011,
828 			(htt_tlv_filter->fp_mgmt_filter &
829 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
830 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
831 			FP, MGMT, 0100,
832 			(htt_tlv_filter->fp_mgmt_filter &
833 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
834 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
835 			FP, MGMT, 0101,
836 			(htt_tlv_filter->fp_mgmt_filter &
837 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
838 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
839 			FP, MGMT, 0110,
840 			(htt_tlv_filter->fp_mgmt_filter &
841 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
842 		/* reserved */
843 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
844 			MGMT, 0111,
845 			(htt_tlv_filter->fp_mgmt_filter &
846 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
847 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
848 			FP, MGMT, 1000,
849 			(htt_tlv_filter->fp_mgmt_filter &
850 			FILTER_MGMT_BEACON) ? 1 : 0);
851 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
852 			FP, MGMT, 1001,
853 			(htt_tlv_filter->fp_mgmt_filter &
854 			FILTER_MGMT_ATIM) ? 1 : 0);
855 	}
856 
857 	if (htt_tlv_filter->enable_md) {
858 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
859 				MGMT, 0000, 1);
860 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
861 				MGMT, 0001, 1);
862 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
863 				MGMT, 0010, 1);
864 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
865 				MGMT, 0011, 1);
866 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
867 				MGMT, 0100, 1);
868 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
869 				MGMT, 0101, 1);
870 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
871 				MGMT, 0110, 1);
872 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
873 				MGMT, 0111, 1);
874 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
875 				MGMT, 1000, 1);
876 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
877 				MGMT, 1001, 1);
878 	}
879 
880 	if (htt_tlv_filter->enable_mo) {
881 		/* TYPE: MGMT */
882 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
883 			MO, MGMT, 0000,
884 			(htt_tlv_filter->mo_mgmt_filter &
885 			FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
886 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
887 			MO, MGMT, 0001,
888 			(htt_tlv_filter->mo_mgmt_filter &
889 			FILTER_MGMT_ASSOC_RES) ? 1 : 0);
890 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
891 			MO, MGMT, 0010,
892 			(htt_tlv_filter->mo_mgmt_filter &
893 			FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
894 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
895 			MO, MGMT, 0011,
896 			(htt_tlv_filter->mo_mgmt_filter &
897 			FILTER_MGMT_REASSOC_RES) ? 1 : 0);
898 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
899 			MO, MGMT, 0100,
900 			(htt_tlv_filter->mo_mgmt_filter &
901 			FILTER_MGMT_PROBE_REQ) ? 1 : 0);
902 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
903 			MO, MGMT, 0101,
904 			(htt_tlv_filter->mo_mgmt_filter &
905 			FILTER_MGMT_PROBE_RES) ? 1 : 0);
906 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
907 			MO, MGMT, 0110,
908 			(htt_tlv_filter->mo_mgmt_filter &
909 			FILTER_MGMT_TIM_ADVT) ? 1 : 0);
910 		/* reserved */
911 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
912 			MGMT, 0111,
913 			(htt_tlv_filter->mo_mgmt_filter &
914 			FILTER_MGMT_RESERVED_7) ? 1 : 0);
915 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
916 			MO, MGMT, 1000,
917 			(htt_tlv_filter->mo_mgmt_filter &
918 			FILTER_MGMT_BEACON) ? 1 : 0);
919 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
920 			MO, MGMT, 1001,
921 			(htt_tlv_filter->mo_mgmt_filter &
922 			FILTER_MGMT_ATIM) ? 1 : 0);
923 	}
924 
925 	/* word 3 */
926 	msg_word++;
927 	*msg_word = 0;
928 
929 	if (htt_tlv_filter->enable_fp) {
930 		/* TYPE: MGMT */
931 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
932 			FP, MGMT, 1010,
933 			(htt_tlv_filter->fp_mgmt_filter &
934 			FILTER_MGMT_DISASSOC) ? 1 : 0);
935 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
936 			FP, MGMT, 1011,
937 			(htt_tlv_filter->fp_mgmt_filter &
938 			FILTER_MGMT_AUTH) ? 1 : 0);
939 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
940 			FP, MGMT, 1100,
941 			(htt_tlv_filter->fp_mgmt_filter &
942 			FILTER_MGMT_DEAUTH) ? 1 : 0);
943 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
944 			FP, MGMT, 1101,
945 			(htt_tlv_filter->fp_mgmt_filter &
946 			FILTER_MGMT_ACTION) ? 1 : 0);
947 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
948 			FP, MGMT, 1110,
949 			(htt_tlv_filter->fp_mgmt_filter &
950 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
951 		/* reserved*/
952 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
953 			MGMT, 1111,
954 			(htt_tlv_filter->fp_mgmt_filter &
955 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
956 	}
957 
958 	if (htt_tlv_filter->enable_md) {
959 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
960 				MGMT, 1010, 1);
961 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
962 				MGMT, 1011, 1);
963 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
964 				MGMT, 1100, 1);
965 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
966 				MGMT, 1101, 1);
967 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
968 				MGMT, 1110, 1);
969 	}
970 
971 	if (htt_tlv_filter->enable_mo) {
972 		/* TYPE: MGMT */
973 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
974 			MO, MGMT, 1010,
975 			(htt_tlv_filter->mo_mgmt_filter &
976 			FILTER_MGMT_DISASSOC) ? 1 : 0);
977 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
978 			MO, MGMT, 1011,
979 			(htt_tlv_filter->mo_mgmt_filter &
980 			FILTER_MGMT_AUTH) ? 1 : 0);
981 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
982 			MO, MGMT, 1100,
983 			(htt_tlv_filter->mo_mgmt_filter &
984 			FILTER_MGMT_DEAUTH) ? 1 : 0);
985 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
986 			MO, MGMT, 1101,
987 			(htt_tlv_filter->mo_mgmt_filter &
988 			FILTER_MGMT_ACTION) ? 1 : 0);
989 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
990 			MO, MGMT, 1110,
991 			(htt_tlv_filter->mo_mgmt_filter &
992 			FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
993 		/* reserved*/
994 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
995 			MGMT, 1111,
996 			(htt_tlv_filter->mo_mgmt_filter &
997 			FILTER_MGMT_RESERVED_15) ? 1 : 0);
998 	}
999 
1000 	/* word 4 */
1001 	msg_word++;
1002 	*msg_word = 0;
1003 
1004 	if (htt_tlv_filter->enable_fp) {
1005 		/* TYPE: CTRL */
1006 		/* reserved */
1007 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1008 			CTRL, 0000,
1009 			(htt_tlv_filter->fp_ctrl_filter &
1010 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1011 		/* reserved */
1012 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1013 			CTRL, 0001,
1014 			(htt_tlv_filter->fp_ctrl_filter &
1015 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1016 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1017 			CTRL, 0010,
1018 			(htt_tlv_filter->fp_ctrl_filter &
1019 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1020 		/* reserved */
1021 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1022 			CTRL, 0011,
1023 			(htt_tlv_filter->fp_ctrl_filter &
1024 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1025 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1026 			CTRL, 0100,
1027 			(htt_tlv_filter->fp_ctrl_filter &
1028 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1029 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1030 			CTRL, 0101,
1031 			(htt_tlv_filter->fp_ctrl_filter &
1032 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1033 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1034 			CTRL, 0110,
1035 			(htt_tlv_filter->fp_ctrl_filter &
1036 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1037 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1038 			CTRL, 0111,
1039 			(htt_tlv_filter->fp_ctrl_filter &
1040 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1041 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1042 			CTRL, 1000,
1043 			(htt_tlv_filter->fp_ctrl_filter &
1044 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1045 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
1046 			CTRL, 1001,
1047 			(htt_tlv_filter->fp_ctrl_filter &
1048 			FILTER_CTRL_BA) ? 1 : 0);
1049 	}
1050 
1051 	if (htt_tlv_filter->enable_md) {
1052 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1053 				CTRL, 0000, 1);
1054 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1055 				CTRL, 0001, 1);
1056 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1057 				CTRL, 0010, 1);
1058 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1059 				CTRL, 0011, 1);
1060 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1061 				CTRL, 0100, 1);
1062 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1063 				CTRL, 0101, 1);
1064 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1065 				CTRL, 0110, 1);
1066 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1067 				CTRL, 0111, 1);
1068 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1069 				CTRL, 1000, 1);
1070 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1071 				CTRL, 1001, 1);
1072 	}
1073 
1074 	if (htt_tlv_filter->enable_mo) {
1075 		/* TYPE: CTRL */
1076 		/* reserved */
1077 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1078 			CTRL, 0000,
1079 			(htt_tlv_filter->mo_ctrl_filter &
1080 			FILTER_CTRL_RESERVED_1) ? 1 : 0);
1081 		/* reserved */
1082 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1083 			CTRL, 0001,
1084 			(htt_tlv_filter->mo_ctrl_filter &
1085 			FILTER_CTRL_RESERVED_2) ? 1 : 0);
1086 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1087 			CTRL, 0010,
1088 			(htt_tlv_filter->mo_ctrl_filter &
1089 			FILTER_CTRL_TRIGGER) ? 1 : 0);
1090 		/* reserved */
1091 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1092 			CTRL, 0011,
1093 			(htt_tlv_filter->mo_ctrl_filter &
1094 			FILTER_CTRL_RESERVED_4) ? 1 : 0);
1095 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1096 			CTRL, 0100,
1097 			(htt_tlv_filter->mo_ctrl_filter &
1098 			FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1099 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1100 			CTRL, 0101,
1101 			(htt_tlv_filter->mo_ctrl_filter &
1102 			FILTER_CTRL_VHT_NDP) ? 1 : 0);
1103 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1104 			CTRL, 0110,
1105 			(htt_tlv_filter->mo_ctrl_filter &
1106 			FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1107 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1108 			CTRL, 0111,
1109 			(htt_tlv_filter->mo_ctrl_filter &
1110 			FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1111 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1112 			CTRL, 1000,
1113 			(htt_tlv_filter->mo_ctrl_filter &
1114 			FILTER_CTRL_BA_REQ) ? 1 : 0);
1115 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
1116 			CTRL, 1001,
1117 			(htt_tlv_filter->mo_ctrl_filter &
1118 			FILTER_CTRL_BA) ? 1 : 0);
1119 	}
1120 
1121 	/* word 5 */
1122 	msg_word++;
1123 	*msg_word = 0;
1124 	if (htt_tlv_filter->enable_fp) {
1125 		/* TYPE: CTRL */
1126 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1127 			CTRL, 1010,
1128 			(htt_tlv_filter->fp_ctrl_filter &
1129 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1130 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1131 			CTRL, 1011,
1132 			(htt_tlv_filter->fp_ctrl_filter &
1133 			FILTER_CTRL_RTS) ? 1 : 0);
1134 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1135 			CTRL, 1100,
1136 			(htt_tlv_filter->fp_ctrl_filter &
1137 			FILTER_CTRL_CTS) ? 1 : 0);
1138 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1139 			CTRL, 1101,
1140 			(htt_tlv_filter->fp_ctrl_filter &
1141 			FILTER_CTRL_ACK) ? 1 : 0);
1142 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1143 			CTRL, 1110,
1144 			(htt_tlv_filter->fp_ctrl_filter &
1145 			FILTER_CTRL_CFEND) ? 1 : 0);
1146 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1147 			CTRL, 1111,
1148 			(htt_tlv_filter->fp_ctrl_filter &
1149 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1150 		/* TYPE: DATA */
1151 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1152 			DATA, MCAST,
1153 			(htt_tlv_filter->fp_data_filter &
1154 			FILTER_DATA_MCAST) ? 1 : 0);
1155 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1156 			DATA, UCAST,
1157 			(htt_tlv_filter->fp_data_filter &
1158 			FILTER_DATA_UCAST) ? 1 : 0);
1159 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
1160 			DATA, NULL,
1161 			(htt_tlv_filter->fp_data_filter &
1162 			FILTER_DATA_NULL) ? 1 : 0);
1163 	}
1164 
1165 	if (htt_tlv_filter->enable_md) {
1166 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1167 				CTRL, 1010, 1);
1168 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1169 				CTRL, 1011, 1);
1170 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1171 				CTRL, 1100, 1);
1172 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1173 				CTRL, 1101, 1);
1174 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1175 				CTRL, 1110, 1);
1176 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1177 				CTRL, 1111, 1);
1178 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1179 				DATA, MCAST, 1);
1180 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1181 				DATA, UCAST, 1);
1182 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
1183 				DATA, NULL, 1);
1184 	}
1185 
1186 	if (htt_tlv_filter->enable_mo) {
1187 		/* TYPE: CTRL */
1188 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1189 			CTRL, 1010,
1190 			(htt_tlv_filter->mo_ctrl_filter &
1191 			FILTER_CTRL_PSPOLL) ? 1 : 0);
1192 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1193 			CTRL, 1011,
1194 			(htt_tlv_filter->mo_ctrl_filter &
1195 			FILTER_CTRL_RTS) ? 1 : 0);
1196 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1197 			CTRL, 1100,
1198 			(htt_tlv_filter->mo_ctrl_filter &
1199 			FILTER_CTRL_CTS) ? 1 : 0);
1200 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1201 			CTRL, 1101,
1202 			(htt_tlv_filter->mo_ctrl_filter &
1203 			FILTER_CTRL_ACK) ? 1 : 0);
1204 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1205 			CTRL, 1110,
1206 			(htt_tlv_filter->mo_ctrl_filter &
1207 			FILTER_CTRL_CFEND) ? 1 : 0);
1208 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1209 			CTRL, 1111,
1210 			(htt_tlv_filter->mo_ctrl_filter &
1211 			FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1212 		/* TYPE: DATA */
1213 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1214 			DATA, MCAST,
1215 			(htt_tlv_filter->mo_data_filter &
1216 			FILTER_DATA_MCAST) ? 1 : 0);
1217 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1218 			DATA, UCAST,
1219 			(htt_tlv_filter->mo_data_filter &
1220 			FILTER_DATA_UCAST) ? 1 : 0);
1221 		htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
1222 			DATA, NULL,
1223 			(htt_tlv_filter->mo_data_filter &
1224 			FILTER_DATA_NULL) ? 1 : 0);
1225 	}
1226 
1227 	/* word 6 */
1228 	msg_word++;
1229 	*msg_word = 0;
1230 	tlv_filter = 0;
1231 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1232 		htt_tlv_filter->mpdu_start);
1233 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1234 		htt_tlv_filter->msdu_start);
1235 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1236 		htt_tlv_filter->packet);
1237 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1238 		htt_tlv_filter->msdu_end);
1239 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1240 		htt_tlv_filter->mpdu_end);
1241 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1242 		htt_tlv_filter->packet_header);
1243 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
1244 		htt_tlv_filter->attention);
1245 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1246 		htt_tlv_filter->ppdu_start);
1247 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1248 		htt_tlv_filter->ppdu_end);
1249 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1250 		htt_tlv_filter->ppdu_end_user_stats);
1251 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1252 		PPDU_END_USER_STATS_EXT,
1253 		htt_tlv_filter->ppdu_end_user_stats_ext);
1254 	htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1255 		htt_tlv_filter->ppdu_end_status_done);
1256 	/* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1257 	 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1258 		 htt_tlv_filter->header_per_msdu);
1259 
1260 	HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1261 
1262 	/* "response_required" field should be set if a HTT response message is
1263 	 * required after setting up the ring.
1264 	 */
1265 	pkt = htt_htc_pkt_alloc(soc);
1266 	if (!pkt)
1267 		goto fail1;
1268 
1269 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
1270 
1271 	SET_HTC_PACKET_INFO_TX(
1272 		&pkt->htc_pkt,
1273 		dp_htt_h2t_send_complete_free_netbuf,
1274 		qdf_nbuf_data(htt_msg),
1275 		qdf_nbuf_len(htt_msg),
1276 		soc->htc_endpoint,
1277 		1); /* tag - not relevant here */
1278 
1279 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
1280 	DP_HTT_SEND_HTC_PKT(soc, pkt);
1281 	return QDF_STATUS_SUCCESS;
1282 
1283 fail1:
1284 	qdf_nbuf_free(htt_msg);
1285 fail0:
1286 	return QDF_STATUS_E_FAILURE;
1287 }
1288 
1289 #if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
1290 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1291 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1292 
1293 {
1294 	uint32_t pdev_id;
1295 	uint32_t *msg_word = NULL;
1296 	uint32_t msg_remain_len = 0;
1297 
1298 	msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1299 
1300 	/*COOKIE MSB*/
1301 	pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1302 
1303 	/* stats message length + 16 size of HTT header*/
1304 	msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1305 				(uint32_t)DP_EXT_MSG_LENGTH);
1306 
1307 	dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1308 			msg_word,  msg_remain_len,
1309 			WDI_NO_VAL, pdev_id);
1310 
1311 	if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1312 		htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1313 	}
1314 	/* Need to be freed here as WDI handler will
1315 	 * make a copy of pkt to send data to application
1316 	 */
1317 	qdf_nbuf_free(htt_msg);
1318 	return QDF_STATUS_SUCCESS;
1319 }
1320 #else
1321 static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1322 					struct dp_soc *soc, qdf_nbuf_t htt_msg)
1323 {
1324 	return QDF_STATUS_E_NOSUPPORT;
1325 }
1326 #endif
1327 
1328 /**
1329  * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
1330  * @htt_stats: htt stats info
1331  *
1332  * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1333  * contains sub messages which are identified by a TLV header.
1334  * In this function we will process the stream of T2H messages and read all the
1335  * TLV contained in the message.
1336  *
1337  * THe following cases have been taken care of
1338  * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1339  *		In this case the buffer will contain multiple tlvs.
1340  * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1341  *		Only one tlv will be contained in the HTT message and this tag
1342  *		will extend onto the next buffer.
1343  * Case 3: When the buffer is the continuation of the previous message
1344  * Case 4: tlv length is 0. which will indicate the end of message
1345  *
1346  * return: void
1347  */
1348 static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1349 					struct dp_soc *soc)
1350 {
1351 	htt_tlv_tag_t tlv_type = 0xff;
1352 	qdf_nbuf_t htt_msg = NULL;
1353 	uint32_t *msg_word;
1354 	uint8_t *tlv_buf_head = NULL;
1355 	uint8_t *tlv_buf_tail = NULL;
1356 	uint32_t msg_remain_len = 0;
1357 	uint32_t tlv_remain_len = 0;
1358 	uint32_t *tlv_start;
1359 	int cookie_val;
1360 	int cookie_msb;
1361 	int pdev_id;
1362 	bool copy_stats = false;
1363 	struct dp_pdev *pdev;
1364 
1365 	/* Process node in the HTT message queue */
1366 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1367 		!= NULL) {
1368 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1369 		cookie_val = *(msg_word + 1);
1370 		if (cookie_val) {
1371 			if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1372 					== QDF_STATUS_SUCCESS) {
1373 				continue;
1374 			}
1375 		}
1376 		cookie_msb = *(msg_word + 2);
1377 		pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1378 		pdev = soc->pdev_list[pdev_id];
1379 
1380 		if (cookie_msb >> 2) {
1381 			copy_stats = true;
1382 		}
1383 		/* read 5th word */
1384 		msg_word = msg_word + 4;
1385 		msg_remain_len = qdf_min(htt_stats->msg_len,
1386 				(uint32_t) DP_EXT_MSG_LENGTH);
1387 		/* Keep processing the node till node length is 0 */
1388 		while (msg_remain_len) {
1389 			/*
1390 			 * if message is not a continuation of previous message
1391 			 * read the tlv type and tlv length
1392 			 */
1393 			if (!tlv_buf_head) {
1394 				tlv_type = HTT_STATS_TLV_TAG_GET(
1395 						*msg_word);
1396 				tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1397 						*msg_word);
1398 			}
1399 
1400 			if (tlv_remain_len == 0) {
1401 				msg_remain_len = 0;
1402 
1403 				if (tlv_buf_head) {
1404 					qdf_mem_free(tlv_buf_head);
1405 					tlv_buf_head = NULL;
1406 					tlv_buf_tail = NULL;
1407 				}
1408 
1409 				goto error;
1410 			}
1411 
1412 			if (!tlv_buf_head)
1413 				tlv_remain_len += HTT_TLV_HDR_LEN;
1414 
1415 			if ((tlv_remain_len <= msg_remain_len)) {
1416 				/* Case 3 */
1417 				if (tlv_buf_head) {
1418 					qdf_mem_copy(tlv_buf_tail,
1419 							(uint8_t *)msg_word,
1420 							tlv_remain_len);
1421 					tlv_start = (uint32_t *)tlv_buf_head;
1422 				} else {
1423 					/* Case 1 */
1424 					tlv_start = msg_word;
1425 				}
1426 
1427 				if (copy_stats)
1428 					dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start);
1429 				else
1430 					dp_htt_stats_print_tag(tlv_type, tlv_start);
1431 
1432 				msg_remain_len -= tlv_remain_len;
1433 
1434 				msg_word = (uint32_t *)
1435 					(((uint8_t *)msg_word) +
1436 					tlv_remain_len);
1437 
1438 				tlv_remain_len = 0;
1439 
1440 				if (tlv_buf_head) {
1441 					qdf_mem_free(tlv_buf_head);
1442 					tlv_buf_head = NULL;
1443 					tlv_buf_tail = NULL;
1444 				}
1445 
1446 			} else { /* tlv_remain_len > msg_remain_len */
1447 				/* Case 2 & 3 */
1448 				if (!tlv_buf_head) {
1449 					tlv_buf_head = qdf_mem_malloc(
1450 							tlv_remain_len);
1451 
1452 					if (!tlv_buf_head) {
1453 						QDF_TRACE(QDF_MODULE_ID_TXRX,
1454 								QDF_TRACE_LEVEL_ERROR,
1455 								"Alloc failed");
1456 						goto error;
1457 					}
1458 
1459 					tlv_buf_tail = tlv_buf_head;
1460 				}
1461 
1462 				qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1463 						msg_remain_len);
1464 				tlv_remain_len -= msg_remain_len;
1465 				tlv_buf_tail += msg_remain_len;
1466 			}
1467 		}
1468 
1469 		if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1470 			htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1471 		}
1472 
1473 		qdf_nbuf_free(htt_msg);
1474 	}
1475 	return;
1476 
1477 error:
1478 	qdf_nbuf_free(htt_msg);
1479 	while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1480 			!= NULL)
1481 		qdf_nbuf_free(htt_msg);
1482 }
1483 
1484 void htt_t2h_stats_handler(void *context)
1485 {
1486 	struct dp_soc *soc = (struct dp_soc *)context;
1487 	struct htt_stats_context htt_stats;
1488 	uint32_t length;
1489 	uint32_t *msg_word;
1490 	qdf_nbuf_t htt_msg = NULL;
1491 	uint8_t done;
1492 	uint8_t rem_stats;
1493 
1494 	if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1495 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1496 			"soc: 0x%pK, init_done: %d", soc,
1497 			qdf_atomic_read(&soc->cmn_init_done));
1498 		return;
1499 	}
1500 
1501 	qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1502 	qdf_nbuf_queue_init(&htt_stats.msg);
1503 
1504 	/* pull one completed stats from soc->htt_stats_msg and process */
1505 	qdf_spin_lock_bh(&soc->htt_stats.lock);
1506 	if (!soc->htt_stats.num_stats) {
1507 		qdf_spin_unlock_bh(&soc->htt_stats.lock);
1508 		return;
1509 	}
1510 	while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1511 		msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1512 		msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
1513 		length = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(*msg_word);
1514 		done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1515 		qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1516 		/*
1517 		 * HTT EXT stats response comes as stream of TLVs which span over
1518 		 * multiple T2H messages.
1519 		 * The first message will carry length of the response.
1520 		 * For rest of the messages length will be zero.
1521 		 */
1522 		if (length)
1523 			htt_stats.msg_len = length;
1524 		/*
1525 		 * Done bit signifies that this is the last T2H buffer in the
1526 		 * stream of HTT EXT STATS message
1527 		 */
1528 		if (done)
1529 			break;
1530 	}
1531 	rem_stats = --soc->htt_stats.num_stats;
1532 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
1533 
1534 	dp_process_htt_stat_msg(&htt_stats, soc);
1535 	/* If there are more stats to process, schedule stats work again */
1536 	if (rem_stats)
1537 		qdf_sched_work(0, &soc->htt_stats.work);
1538 }
1539 
1540 /*
1541  * dp_get_ppdu_info_user_index: Find place holder for the received
1542  * ppdu stats info
1543  * pdev: DP pdev handle
1544  *
1545  * return:user index to be populated
1546  */
1547 #ifdef FEATURE_PERPKT_INFO
1548 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
1549 						uint16_t peer_id)
1550 {
1551 	uint8_t user_index = 0;
1552 	struct cdp_tx_completion_ppdu *ppdu_desc;
1553 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1554 
1555 	ppdu_desc =
1556 	(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(pdev->tx_ppdu_info.buf);
1557 
1558 	while ((user_index + 1) <= pdev->tx_ppdu_info.last_user) {
1559 		ppdu_user_desc = &ppdu_desc->user[user_index];
1560 		if (ppdu_user_desc->peer_id != peer_id) {
1561 			user_index++;
1562 			continue;
1563 		} else {
1564 			/* Max users possible is 8 so user array index should
1565 			 * not exceed 7
1566 			 */
1567 			qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
1568 			return user_index;
1569 		}
1570 	}
1571 
1572 	pdev->tx_ppdu_info.last_user++;
1573 	/* Max users possible is 8 so last user should not exceed 8 */
1574 	qdf_assert_always(pdev->tx_ppdu_info.last_user <= CDP_MU_MAX_USERS);
1575 	return pdev->tx_ppdu_info.last_user - 1;
1576 }
1577 
1578 /*
1579  * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
1580  * pdev: DP pdev handle
1581  * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
1582  *
1583  * return:void
1584  */
1585 static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
1586 		uint32_t *tag_buf)
1587 {
1588 	uint16_t frame_type;
1589 	uint16_t freq;
1590 	struct dp_soc *soc = NULL;
1591 	struct cdp_tx_completion_ppdu *ppdu_desc;
1592 	htt_ppdu_stats_common_tlv *dp_stats_buf =
1593 		(htt_ppdu_stats_common_tlv *)tag_buf;
1594 
1595 	ppdu_desc =
1596 	(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(pdev->tx_ppdu_info.buf);
1597 
1598 	ppdu_desc->ppdu_id = dp_stats_buf->ppdu_id;
1599 	tag_buf += 2;
1600 	ppdu_desc->num_users =
1601 		HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
1602 	tag_buf++;
1603 	frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
1604 
1605 	if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) ||
1606 			(frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU))
1607 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
1608 	else
1609 		ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
1610 
1611 	tag_buf += 2;
1612 	ppdu_desc->tx_duration = *tag_buf;
1613 	tag_buf += 3;
1614 	ppdu_desc->ppdu_start_timestamp = *tag_buf;
1615 
1616 	ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
1617 					ppdu_desc->tx_duration;
1618 
1619 	tag_buf++;
1620 
1621 	freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
1622 	if (freq != ppdu_desc->channel) {
1623 		soc = pdev->soc;
1624 		ppdu_desc->channel = freq;
1625 		if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
1626 			pdev->operating_channel =
1627 		soc->cdp_soc.ol_ops->freq_to_channel(pdev->osif_pdev, freq);
1628 	}
1629 
1630 	ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
1631 }
1632 
1633 /*
1634  * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
1635  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
1636  *
1637  * return:void
1638  */
1639 static void dp_process_ppdu_stats_user_common_tlv(
1640 		struct dp_pdev *pdev, uint32_t *tag_buf)
1641 {
1642 	uint16_t peer_id;
1643 	struct dp_peer *peer;
1644 	struct cdp_tx_completion_ppdu *ppdu_desc;
1645 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1646 	uint8_t curr_user_index = 0;
1647 
1648 	ppdu_desc =
1649 	(struct cdp_tx_completion_ppdu *) qdf_nbuf_data(pdev->tx_ppdu_info.buf);
1650 
1651 	tag_buf++;
1652 	peer_id = HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
1653 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1654 
1655 	if (!peer)
1656 		return;
1657 
1658 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id);
1659 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1660 
1661 	ppdu_user_desc->peer_id = peer_id;
1662 
1663 	tag_buf++;
1664 
1665 	if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
1666 		ppdu_user_desc->is_mcast = true;
1667 		ppdu_user_desc->mpdu_tried_mcast =
1668 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1669 	} else {
1670 		ppdu_user_desc->mpdu_tried_ucast =
1671 		HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
1672 	}
1673 
1674 	tag_buf++;
1675 
1676 	ppdu_user_desc->qos_ctrl =
1677 		HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
1678 	ppdu_user_desc->frame_ctrl =
1679 		HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
1680 }
1681 
1682 
1683 /**
1684  * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
1685  * @pdev: DP pdev handle
1686  * @tag_buf: T2H message buffer carrying the user rate TLV
1687  *
1688  * return:void
1689  */
1690 static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
1691 		uint32_t *tag_buf)
1692 {
1693 	uint16_t peer_id;
1694 	struct dp_peer *peer;
1695 	struct cdp_tx_completion_ppdu *ppdu_desc;
1696 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1697 	uint8_t curr_user_index = 0;
1698 
1699 	ppdu_desc =
1700 	(struct cdp_tx_completion_ppdu *) qdf_nbuf_data(pdev->tx_ppdu_info.buf);
1701 
1702 	tag_buf++;
1703 	peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
1704 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1705 
1706 	if (!peer)
1707 		return;
1708 
1709 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id);
1710 
1711 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1712 	ppdu_user_desc->peer_id = peer_id;
1713 
1714 	ppdu_user_desc->tid =
1715 		HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
1716 
1717 	qdf_mem_copy(ppdu_user_desc->mac_addr, peer->mac_addr.raw,
1718 			DP_MAC_ADDR_LEN);
1719 
1720 	tag_buf += 2;
1721 
1722 	ppdu_user_desc->ru_tones = (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
1723 			HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
1724 
1725 	tag_buf += 2;
1726 
1727 	ppdu_user_desc->ppdu_type =
1728 		HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
1729 
1730 	tag_buf++;
1731 	ppdu_user_desc->tx_rate = *tag_buf;
1732 
1733 	ppdu_user_desc->ltf_size =
1734 		HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
1735 	ppdu_user_desc->stbc =
1736 		HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
1737 	ppdu_user_desc->he_re =
1738 		HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
1739 	ppdu_user_desc->txbf =
1740 		HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
1741 	ppdu_user_desc->bw =
1742 		HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf);
1743 	ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
1744 	ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
1745 	ppdu_user_desc->preamble =
1746 		HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
1747 	ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
1748 	ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
1749 	ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
1750 }
1751 
1752 /*
1753  * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
1754  * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1755  * pdev: DP PDEV handle
1756  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
1757  *
1758  * return:void
1759  */
1760 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
1761 		struct dp_pdev *pdev, uint32_t *tag_buf)
1762 {
1763 	htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
1764 		(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
1765 
1766 	struct cdp_tx_completion_ppdu *ppdu_desc;
1767 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1768 	uint8_t curr_user_index = 0;
1769 	uint16_t peer_id;
1770 	struct dp_peer *peer;
1771 
1772 	ppdu_desc =
1773 	(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(pdev->tx_ppdu_info.buf);
1774 
1775 	tag_buf++;
1776 
1777 	peer_id =
1778 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1779 
1780 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1781 
1782 	if (!peer)
1783 		return;
1784 
1785 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id);
1786 
1787 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1788 	ppdu_user_desc->peer_id = peer_id;
1789 
1790 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1791 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1792 					CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1793 }
1794 
1795 /*
1796  * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
1797  * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1798  * soc: DP SOC handle
1799  * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
1800  *
1801  * return:void
1802  */
1803 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
1804 		struct dp_pdev *pdev, uint32_t *tag_buf)
1805 {
1806 	htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
1807 		(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
1808 
1809 	struct cdp_tx_completion_ppdu *ppdu_desc;
1810 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1811 	uint8_t curr_user_index = 0;
1812 	uint16_t peer_id;
1813 	struct dp_peer *peer;
1814 
1815 	ppdu_desc =
1816 	(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(pdev->tx_ppdu_info.buf);
1817 
1818 	tag_buf++;
1819 
1820 	peer_id =
1821 	HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1822 
1823 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1824 
1825 	if (!peer)
1826 		return;
1827 
1828 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id);
1829 
1830 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1831 	ppdu_user_desc->peer_id = peer_id;
1832 
1833 	ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
1834 	qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
1835 					CDP_BA_256_BIT_MAP_SIZE_DWORDS);
1836 }
1837 
1838 /*
1839  * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
1840  * htt_ppdu_stats_user_cmpltn_common_tlv
1841  * soc: DP SOC handle
1842  * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
1843  *
1844  * return:void
1845  */
1846 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
1847 		struct dp_pdev *pdev, uint32_t *tag_buf)
1848 {
1849 	uint16_t peer_id;
1850 	struct dp_peer *peer;
1851 	struct cdp_tx_completion_ppdu *ppdu_desc;
1852 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1853 	uint8_t curr_user_index = 0;
1854 	htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
1855 		(htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
1856 
1857 	ppdu_desc =
1858 	(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(pdev->tx_ppdu_info.buf);
1859 
1860 	tag_buf++;
1861 	peer_id =
1862 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
1863 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1864 
1865 	if (!peer)
1866 		return;
1867 
1868 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id);
1869 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1870 	ppdu_user_desc->peer_id = peer_id;
1871 
1872 	ppdu_user_desc->completion_status =
1873 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
1874 				*tag_buf);
1875 
1876 	ppdu_user_desc->tid =
1877 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
1878 
1879 
1880 	tag_buf++;
1881 	ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
1882 
1883 	tag_buf++;
1884 
1885 	ppdu_user_desc->mpdu_success =
1886 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
1887 
1888 	tag_buf++;
1889 
1890 	ppdu_user_desc->long_retries =
1891 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
1892 
1893 	ppdu_user_desc->short_retries =
1894 	HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
1895 	ppdu_user_desc->retry_msdus =
1896 		ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
1897 
1898 	ppdu_user_desc->is_ampdu =
1899 		HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
1900 
1901 }
1902 
1903 /*
1904  * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
1905  * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
1906  * pdev: DP PDEV handle
1907  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
1908  *
1909  * return:void
1910  */
1911 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
1912 		struct dp_pdev *pdev, uint32_t *tag_buf)
1913 {
1914 	htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
1915 		(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
1916 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1917 	struct cdp_tx_completion_ppdu *ppdu_desc;
1918 	uint8_t curr_user_index = 0;
1919 	uint16_t peer_id;
1920 	struct dp_peer *peer;
1921 
1922 	ppdu_desc =
1923 	(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(pdev->tx_ppdu_info.buf);
1924 
1925 	tag_buf++;
1926 
1927 	peer_id =
1928 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1929 
1930 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1931 
1932 	if (!peer)
1933 		return;
1934 
1935 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id);
1936 
1937 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1938 	ppdu_user_desc->peer_id = peer_id;
1939 
1940 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
1941 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
1942 			CDP_BA_64_BIT_MAP_SIZE_DWORDS);
1943 }
1944 
1945 /*
1946  * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
1947  * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
1948  * pdev: DP PDEV handle
1949  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
1950  *
1951  * return:void
1952  */
1953 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
1954 		struct dp_pdev *pdev, uint32_t *tag_buf)
1955 {
1956 	htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
1957 		(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
1958 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
1959 	struct cdp_tx_completion_ppdu *ppdu_desc;
1960 	uint8_t curr_user_index = 0;
1961 	uint16_t peer_id;
1962 	struct dp_peer *peer;
1963 
1964 	ppdu_desc =
1965 	(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(pdev->tx_ppdu_info.buf);
1966 
1967 	tag_buf++;
1968 
1969 	peer_id =
1970 	HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
1971 
1972 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
1973 
1974 	if (!peer)
1975 		return;
1976 
1977 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id);
1978 
1979 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
1980 	ppdu_user_desc->peer_id = peer_id;
1981 
1982 	ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
1983 	qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
1984 			CDP_BA_256_BIT_MAP_SIZE_DWORDS);
1985 }
1986 
1987 /*
1988  * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
1989  * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
1990  * pdev: DP PDE handle
1991  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
1992  *
1993  * return:void
1994  */
1995 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
1996 		struct dp_pdev *pdev, uint32_t *tag_buf)
1997 {
1998 	uint16_t peer_id;
1999 	struct dp_peer *peer;
2000 	struct cdp_tx_completion_ppdu *ppdu_desc;
2001 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2002 	uint8_t curr_user_index = 0;
2003 
2004 	ppdu_desc =
2005 	(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(pdev->tx_ppdu_info.buf);
2006 
2007 	tag_buf += 2;
2008 	peer_id =
2009 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2010 
2011 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2012 
2013 	if (!peer)
2014 		return;
2015 
2016 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id);
2017 
2018 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2019 	ppdu_user_desc->peer_id = peer_id;
2020 
2021 	tag_buf++;
2022 	ppdu_user_desc->num_mpdu =
2023 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
2024 
2025 	ppdu_user_desc->num_msdu =
2026 	HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
2027 
2028 	ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2029 
2030 	tag_buf += 2;
2031 	ppdu_user_desc->success_bytes = *tag_buf;
2032 
2033 }
2034 
2035 /*
2036  * dp_process_ppdu_stats_user_common_array_tlv: Process
2037  * htt_ppdu_stats_user_common_array_tlv
2038  * pdev: DP PDEV handle
2039  * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
2040  *
2041  * return:void
2042  */
2043 static void dp_process_ppdu_stats_user_common_array_tlv(struct dp_pdev *pdev,
2044 						uint32_t *tag_buf)
2045 {
2046 	uint32_t peer_id;
2047 	struct dp_peer *peer;
2048 	struct cdp_tx_completion_ppdu *ppdu_desc;
2049 	struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2050 	uint8_t curr_user_index = 0;
2051 	struct htt_tx_ppdu_stats_info *dp_stats_buf;
2052 
2053 	ppdu_desc =
2054 	(struct cdp_tx_completion_ppdu *)qdf_nbuf_data(pdev->tx_ppdu_info.buf);
2055 
2056 	tag_buf++;
2057 	dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
2058 	tag_buf += 3;
2059 	peer_id =
2060 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2061 
2062 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2063 
2064 	if (!peer) {
2065 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2066 			"Invalid peer");
2067 		return;
2068 	}
2069 
2070 	curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id);
2071 
2072 	ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2073 
2074 	ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2075 	ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2076 
2077 	tag_buf++;
2078 
2079 	ppdu_user_desc->success_msdus =
2080 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2081 	ppdu_user_desc->retry_bytes =
2082 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2083 	tag_buf++;
2084 	ppdu_user_desc->failed_msdus =
2085 		HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
2086 }
2087 
2088 /*
2089  * dp_process_ppdu_stats_flush_tlv: Process
2090  * htt_ppdu_stats_flush_tlv
2091  * @pdev: DP PDEV handle
2092  * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
2093  *
2094  * return:void
2095  */
2096 static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2097 						uint32_t *tag_buf)
2098 {
2099 	uint32_t peer_id;
2100 	uint32_t drop_reason;
2101 	uint8_t tid;
2102 	uint32_t num_msdu;
2103 	struct dp_peer *peer;
2104 
2105 	tag_buf++;
2106 	drop_reason = *tag_buf;
2107 
2108 	tag_buf++;
2109 	num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2110 
2111 	tag_buf++;
2112 	peer_id =
2113 		HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2114 
2115 	peer = dp_peer_find_by_id(pdev->soc, peer_id);
2116 	if (!peer)
2117 		return;
2118 
2119 	tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2120 
2121 	if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2122 		DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2123 					num_msdu);
2124 	}
2125 }
2126 
2127 /*
2128  * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2129  * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2130  * @pdev: DP PDEV handle
2131  * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2132  * @length: tlv_length
2133  *
2134  * return:void
2135  */
2136 static void dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(
2137 	struct dp_pdev *pdev, uint32_t *tag_buf, uint32_t length)
2138 {
2139 	htt_ppdu_stats_tx_mgmtctrl_payload_tlv *dp_stats_buf;
2140 	qdf_nbuf_t nbuf;
2141 	uint32_t payload_size;
2142 
2143 	if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode))
2144 		return;
2145 
2146 	payload_size = length - HTT_MGMT_CTRL_TLV_RESERVERD_LEN;
2147 	nbuf = NULL;
2148 	dp_stats_buf = (htt_ppdu_stats_tx_mgmtctrl_payload_tlv *)tag_buf;
2149 
2150 
2151 	nbuf = qdf_nbuf_alloc(pdev->soc->osdev, payload_size, 0, 4, true);
2152 
2153 	if (!nbuf) {
2154 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2155 				"Nbuf Allocation failed for Mgmt. payload");
2156 		qdf_assert(0);
2157 		return;
2158 	}
2159 
2160 	qdf_nbuf_put_tail(nbuf, payload_size);
2161 	qdf_mem_copy(qdf_nbuf_data(nbuf), dp_stats_buf->payload, payload_size);
2162 
2163 	dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2164 		nbuf, HTT_INVALID_PEER,
2165 		WDI_NO_VAL, pdev->pdev_id);
2166 }
2167 
2168 /**
2169  * dp_process_ppdu_tag(): Function to process the PPDU TLVs
2170  * @soc: DP Physical device (radio) handle
2171  * @tag_buf: TLV buffer
2172  *
2173  * return: void
2174  */
2175 static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
2176 		uint32_t tlv_len)
2177 {
2178 	uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
2179 	switch (tlv_type) {
2180 	case HTT_PPDU_STATS_COMMON_TLV:
2181 		qdf_assert_always(tlv_len ==
2182 				sizeof(htt_ppdu_stats_common_tlv));
2183 		dp_process_ppdu_stats_common_tlv(pdev, tag_buf);
2184 		break;
2185 	case HTT_PPDU_STATS_USR_COMMON_TLV:
2186 		qdf_assert_always(tlv_len ==
2187 				sizeof(htt_ppdu_stats_user_common_tlv));
2188 		dp_process_ppdu_stats_user_common_tlv(pdev, tag_buf);
2189 		break;
2190 	case HTT_PPDU_STATS_USR_RATE_TLV:
2191 		qdf_assert_always(tlv_len ==
2192 				sizeof(htt_ppdu_stats_user_rate_tlv));
2193 		dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf);
2194 		break;
2195 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
2196 		qdf_assert_always(tlv_len ==
2197 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv));
2198 		dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(pdev, tag_buf);
2199 		break;
2200 	case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
2201 		qdf_assert_always(tlv_len ==
2202 				sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv));
2203 		dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(pdev, tag_buf);
2204 		break;
2205 	case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
2206 		qdf_assert_always(tlv_len ==
2207 				sizeof(htt_ppdu_stats_user_cmpltn_common_tlv));
2208 		dp_process_ppdu_stats_user_cmpltn_common_tlv(pdev, tag_buf);
2209 		break;
2210 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
2211 		qdf_assert_always(tlv_len ==
2212 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv));
2213 		dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(pdev,
2214 								tag_buf);
2215 		break;
2216 	case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
2217 		qdf_assert_always(tlv_len ==
2218 			sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv));
2219 		dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(pdev,
2220 								tag_buf);
2221 		break;
2222 	case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
2223 		qdf_assert_always(tlv_len ==
2224 			sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv));
2225 		dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(pdev,
2226 								tag_buf);
2227 		break;
2228 	case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
2229 		qdf_assert_always(tlv_len ==
2230 			sizeof(htt_ppdu_stats_usr_common_array_tlv_v));
2231 		dp_process_ppdu_stats_user_common_array_tlv(pdev,
2232 							tag_buf);
2233 		break;
2234 	case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
2235 		qdf_assert_always(tlv_len ==
2236 			sizeof(htt_ppdu_stats_flush_tlv));
2237 		dp_process_ppdu_stats_user_compltn_flush_tlv(pdev,
2238 								tag_buf);
2239 		break;
2240 	case HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV:
2241 		dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(pdev,
2242 							tag_buf, tlv_len);
2243 		break;
2244 	default:
2245 		break;
2246 	}
2247 }
2248 
2249 static QDF_STATUS dp_htt_process_tlv(struct dp_pdev *pdev,
2250 		qdf_nbuf_t htt_t2h_msg)
2251 {
2252 	uint32_t length;
2253 	uint32_t ppdu_id;
2254 	uint8_t tlv_type;
2255 	uint32_t tlv_length;
2256 	uint8_t *tlv_buf;
2257 	QDF_STATUS status = QDF_STATUS_E_PENDING;
2258 
2259 	uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2260 
2261 	length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
2262 
2263 	msg_word = msg_word + 1;
2264 	ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
2265 
2266 	msg_word = msg_word + 3;
2267 	while (length > 0) {
2268 		tlv_buf = (uint8_t *)msg_word;
2269 		tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
2270 		tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
2271 		if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
2272 			pdev->stats.ppdu_stats_counter[tlv_type]++;
2273 
2274 		if (tlv_length == 0)
2275 			break;
2276 
2277 		if (tlv_type == HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)
2278 			status = QDF_STATUS_SUCCESS;
2279 
2280 		if ((tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
2281 				(pdev->last_ppdu_id != ppdu_id))
2282 			status = QDF_STATUS_SUCCESS;
2283 
2284 		tlv_length += HTT_TLV_HDR_LEN;
2285 		dp_process_ppdu_tag(pdev, msg_word, tlv_length);
2286 
2287 
2288 		msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
2289 		length -= (tlv_length);
2290 	}
2291 	pdev->last_ppdu_id = ppdu_id;
2292 	return status;
2293 }
2294 #endif /* FEATURE_PERPKT_INFO */
2295 
2296 /**
2297  * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
2298  * @soc: DP SOC handle
2299  * @pdev_id: pdev id
2300  * @htt_t2h_msg: HTT message nbuf
2301  *
2302  * return:void
2303  */
2304 #if defined(WDI_EVENT_ENABLE)
2305 #ifdef FEATURE_PERPKT_INFO
2306 static void dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2307 		uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2308 {
2309 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
2310 	struct dp_peer *peer;
2311 	struct cdp_tx_completion_ppdu *ppdu_desc;
2312 	int status;
2313 	int i;
2314 
2315 	if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
2316 			!pdev->mcopy_mode)
2317 		return;
2318 
2319 	if (!pdev->tx_ppdu_info.buf) {
2320 		/*
2321 		 * Todo: For MU/OFDMA, we need to account for multiple user
2322 		 * descriptors in a PPDU, in skb size.
2323 		 * The allocation has to be moved to ppdu_cmn tlv processing
2324 		 */
2325 		pdev->tx_ppdu_info.buf = qdf_nbuf_alloc(soc->osdev,
2326 				sizeof(struct cdp_tx_completion_ppdu), 0, 4,
2327 				TRUE);
2328 
2329 		if (!pdev->tx_ppdu_info.buf) {
2330 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2331 					"Nbuf Allocation failed for HTT PPDU");
2332 			return;
2333 		}
2334 
2335 		qdf_mem_zero(qdf_nbuf_data(pdev->tx_ppdu_info.buf),
2336 				sizeof(struct cdp_tx_completion_ppdu));
2337 
2338 		if (qdf_nbuf_put_tail(pdev->tx_ppdu_info.buf,
2339 			sizeof(struct cdp_tx_completion_ppdu)) == NULL)	{
2340 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2341 					"No tailroom for HTT PPDU");
2342 			qdf_nbuf_free(pdev->tx_ppdu_info.buf);
2343 			pdev->tx_ppdu_info.buf = NULL;
2344 			pdev->tx_ppdu_info.last_user = 0;
2345 			return;
2346 		}
2347 
2348 	}
2349 
2350 	status = dp_htt_process_tlv(pdev, htt_t2h_msg);
2351 
2352 	if (status == QDF_STATUS_SUCCESS) {
2353 		ppdu_desc = (struct cdp_tx_completion_ppdu *)
2354 			qdf_nbuf_data(pdev->tx_ppdu_info.buf);
2355 
2356 		ppdu_desc->num_users = pdev->tx_ppdu_info.last_user;
2357 
2358 		for (i = 0; i < ppdu_desc->num_users; i++) {
2359 			peer = dp_peer_find_by_id(soc,
2360 					ppdu_desc->user[i].peer_id);
2361 			if (!peer)
2362 				continue;
2363 
2364 			ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
2365 			ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
2366 
2367 			if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) {
2368 				dp_tx_stats_update(soc, peer,
2369 						&ppdu_desc->user[i],
2370 						ppdu_desc->ack_rssi);
2371 			}
2372 		}
2373 
2374 		dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, soc,
2375 				pdev->tx_ppdu_info.buf, HTT_INVALID_PEER,
2376 				WDI_NO_VAL, pdev_id);
2377 
2378 		pdev->tx_ppdu_info.buf = NULL;
2379 		pdev->tx_ppdu_info.last_user = 0;
2380 	}
2381 
2382 }
2383 #else
2384 static void dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
2385 		uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
2386 {
2387 
2388 }
2389 #endif
2390 #endif
2391 
2392 /**
2393  * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
2394  * @soc: DP SOC handle
2395  * @htt_t2h_msg: HTT message nbuf
2396  *
2397  * return:void
2398  */
2399 static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
2400 		qdf_nbuf_t htt_t2h_msg)
2401 {
2402 	uint8_t done;
2403 	qdf_nbuf_t msg_copy;
2404 	uint32_t *msg_word;
2405 
2406 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
2407 	msg_word = msg_word + 3;
2408 	done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
2409 
2410 	/*
2411 	 * HTT EXT stats response comes as stream of TLVs which span over
2412 	 * multiple T2H messages.
2413 	 * The first message will carry length of the response.
2414 	 * For rest of the messages length will be zero.
2415 	 *
2416 	 * Clone the T2H message buffer and store it in a list to process
2417 	 * it later.
2418 	 *
2419 	 * The original T2H message buffers gets freed in the T2H HTT event
2420 	 * handler
2421 	 */
2422 	msg_copy = qdf_nbuf_clone(htt_t2h_msg);
2423 
2424 	if (!msg_copy) {
2425 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2426 				"T2H messge clone failed for HTT EXT STATS");
2427 		goto error;
2428 	}
2429 
2430 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2431 	qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
2432 	/*
2433 	 * Done bit signifies that this is the last T2H buffer in the stream of
2434 	 * HTT EXT STATS message
2435 	 */
2436 	if (done) {
2437 		soc->htt_stats.num_stats++;
2438 		qdf_sched_work(0, &soc->htt_stats.work);
2439 	}
2440 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2441 
2442 	return;
2443 
2444 error:
2445 	qdf_spin_lock_bh(&soc->htt_stats.lock);
2446 	while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
2447 			!= NULL) {
2448 		qdf_nbuf_free(msg_copy);
2449 	}
2450 	soc->htt_stats.num_stats = 0;
2451 	qdf_spin_unlock_bh(&soc->htt_stats.lock);
2452 	return;
2453 
2454 }
2455 
2456 /*
2457  * htt_soc_attach_target() - SOC level HTT setup
2458  * @htt_soc:	HTT SOC handle
2459  *
2460  * Return: 0 on success; error code on failure
2461  */
2462 int htt_soc_attach_target(void *htt_soc)
2463 {
2464 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2465 
2466 	return htt_h2t_ver_req_msg(soc);
2467 }
2468 
2469 
2470 #if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
2471 /*
2472  * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
2473  * @htt_soc:	 HTT SOC handle
2474  * @msg_word:    Pointer to payload
2475  * @htt_t2h_msg: HTT msg nbuf
2476  *
2477  * Return: None
2478  */
2479 static void
2480 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2481 				uint32_t *msg_word,
2482 				qdf_nbuf_t htt_t2h_msg)
2483 {
2484 	u_int8_t pdev_id;
2485 	qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
2486 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2487 		"received HTT_T2H_MSG_TYPE_PPDU_STATS_IND\n");
2488 	pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
2489 	pdev_id = DP_HW2SW_MACID(pdev_id);
2490 	dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
2491 				  htt_t2h_msg);
2492 	dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
2493 		htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
2494 		pdev_id);
2495 }
2496 #else
2497 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
2498 				qdf_nbuf_t htt_t2h_msg)
2499 {
2500 }
2501 #endif
2502 
2503 #if defined(WDI_EVENT_ENABLE) && \
2504 		!defined(REMOVE_PKT_LOG) && defined(CONFIG_WIN)
2505 /*
2506  * dp_pktlog_msg_handler() - Pktlog msg handler
2507  * @htt_soc:	 HTT SOC handle
2508  * @msg_word:    Pointer to payload
2509  *
2510  * Return: None
2511  */
2512 static void
2513 dp_pktlog_msg_handler(struct htt_soc *soc,
2514 				uint32_t *msg_word)
2515 {
2516 	uint8_t pdev_id;
2517 	uint32_t *pl_hdr;
2518 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2519 		"received HTT_T2H_MSG_TYPE_PKTLOG\n");
2520 	pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
2521 	pdev_id = DP_HW2SW_MACID(pdev_id);
2522 	pl_hdr = (msg_word + 1);
2523 	dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
2524 		pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
2525 		pdev_id);
2526 }
2527 #else
2528 static void
2529 dp_pktlog_msg_handler(struct htt_soc *soc,
2530 				uint32_t *msg_word)
2531 {
2532 }
2533 #endif
2534 
2535 /*
2536  * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
2537  * @context:	Opaque context (HTT SOC handle)
2538  * @pkt:	HTC packet
2539  */
2540 static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
2541 {
2542 	struct htt_soc *soc = (struct htt_soc *) context;
2543 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
2544 	u_int32_t *msg_word;
2545 	enum htt_t2h_msg_type msg_type;
2546 
2547 	/* check for successful message reception */
2548 	if (pkt->Status != QDF_STATUS_SUCCESS) {
2549 		if (pkt->Status != QDF_STATUS_E_CANCELED)
2550 			soc->stats.htc_err_cnt++;
2551 
2552 		qdf_nbuf_free(htt_t2h_msg);
2553 		return;
2554 	}
2555 
2556 	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
2557 
2558 	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
2559 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
2560 	switch (msg_type) {
2561 	case HTT_T2H_MSG_TYPE_PEER_MAP:
2562 		{
2563 			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
2564 			u_int8_t *peer_mac_addr;
2565 			u_int16_t peer_id;
2566 			u_int16_t hw_peer_id;
2567 			u_int8_t vdev_id;
2568 
2569 			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
2570 			hw_peer_id =
2571 				HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
2572 			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
2573 			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
2574 				(u_int8_t *) (msg_word+1),
2575 				&mac_addr_deswizzle_buf[0]);
2576 			QDF_TRACE(QDF_MODULE_ID_TXRX,
2577 				QDF_TRACE_LEVEL_INFO,
2578 				"HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
2579 				peer_id, vdev_id);
2580 
2581 			dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
2582 						vdev_id, peer_mac_addr);
2583 			break;
2584 		}
2585 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
2586 		{
2587 			u_int16_t peer_id;
2588 			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
2589 
2590 			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id);
2591 			break;
2592 		}
2593 	case HTT_T2H_MSG_TYPE_SEC_IND:
2594 		{
2595 			u_int16_t peer_id;
2596 			enum htt_sec_type sec_type;
2597 			int is_unicast;
2598 
2599 			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
2600 			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
2601 			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
2602 			/* point to the first part of the Michael key */
2603 			msg_word++;
2604 			dp_rx_sec_ind_handler(
2605 				soc->dp_soc, peer_id, sec_type, is_unicast,
2606 				msg_word, msg_word + 2);
2607 			break;
2608 		}
2609 
2610 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
2611 		{
2612 			dp_ppdu_stats_ind_handler(soc, msg_word, htt_t2h_msg);
2613 			break;
2614 		}
2615 
2616 	case HTT_T2H_MSG_TYPE_PKTLOG:
2617 		{
2618 			dp_pktlog_msg_handler(soc, msg_word);
2619 			break;
2620 		}
2621 
2622 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
2623 		{
2624 			htc_pm_runtime_put(soc->htc_soc);
2625 			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
2626 			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
2627 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2628 				"target uses HTT version %d.%d; host uses %d.%d\n",
2629 				soc->tgt_ver.major, soc->tgt_ver.minor,
2630 				HTT_CURRENT_VERSION_MAJOR,
2631 				HTT_CURRENT_VERSION_MINOR);
2632 			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
2633 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2634 					QDF_TRACE_LEVEL_ERROR,
2635 					"*** Incompatible host/target HTT versions!\n");
2636 			}
2637 			/* abort if the target is incompatible with the host */
2638 			qdf_assert(soc->tgt_ver.major ==
2639 				HTT_CURRENT_VERSION_MAJOR);
2640 			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
2641 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2642 					QDF_TRACE_LEVEL_WARN,
2643 					"*** Warning: host/target HTT versions"
2644 					" are different, though compatible!\n");
2645 			}
2646 			break;
2647 		}
2648 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
2649 		{
2650 			uint16_t peer_id;
2651 			uint8_t tid;
2652 			uint8_t win_sz;
2653 			uint16_t status;
2654 			struct dp_peer *peer;
2655 
2656 			/*
2657 			 * Update REO Queue Desc with new values
2658 			 */
2659 			peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
2660 			tid = HTT_RX_ADDBA_TID_GET(*msg_word);
2661 			win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
2662 			peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
2663 
2664 			/*
2665 			 * Window size needs to be incremented by 1
2666 			 * since fw needs to represent a value of 256
2667 			 * using just 8 bits
2668 			 */
2669 			if (peer) {
2670 				status = dp_addba_requestprocess_wifi3(peer,
2671 						0, tid, 0, win_sz + 1, 0xffff);
2672 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2673 					QDF_TRACE_LEVEL_INFO,
2674 					FL("PeerID %d BAW %d TID %d stat %d\n"),
2675 					peer_id, win_sz, tid, status);
2676 
2677 			} else {
2678 				QDF_TRACE(QDF_MODULE_ID_TXRX,
2679 					QDF_TRACE_LEVEL_ERROR,
2680 					FL("Peer not found peer id %d\n"),
2681 					peer_id);
2682 			}
2683 			break;
2684 		}
2685 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
2686 		{
2687 			dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
2688 			break;
2689 		}
2690 	default:
2691 		break;
2692 	};
2693 
2694 	/* Free the indication buffer */
2695 	qdf_nbuf_free(htt_t2h_msg);
2696 }
2697 
2698 /*
2699  * dp_htt_h2t_full() - Send full handler (called from HTC)
2700  * @context:	Opaque context (HTT SOC handle)
2701  * @pkt:	HTC packet
2702  *
2703  * Return: enum htc_send_full_action
2704  */
2705 static enum htc_send_full_action
2706 dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
2707 {
2708 	return HTC_SEND_FULL_KEEP;
2709 }
2710 
2711 /*
2712  * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
2713  * @context:	Opaque context (HTT SOC handle)
2714  * @nbuf:	nbuf containing T2H message
2715  * @pipe_id:	HIF pipe ID
2716  *
2717  * Return: QDF_STATUS
2718  *
2719  * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
2720  * will be used for packet log and other high-priority HTT messsages. Proper
2721  * HTC connection to be added later once required FW changes are available
2722  */
2723 static QDF_STATUS
2724 dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
2725 {
2726 	A_STATUS rc = QDF_STATUS_SUCCESS;
2727 	HTC_PACKET htc_pkt;
2728 
2729 	qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
2730 	qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
2731 	htc_pkt.Status = QDF_STATUS_SUCCESS;
2732 	htc_pkt.pPktContext = (void *)nbuf;
2733 	dp_htt_t2h_msg_handler(context, &htc_pkt);
2734 
2735 	return rc;
2736 }
2737 
2738 /*
2739  * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
2740  * @htt_soc:	HTT SOC handle
2741  *
2742  * Return: 0 on success; error code on failure
2743  */
2744 static int
2745 htt_htc_soc_attach(struct htt_soc *soc)
2746 {
2747 	struct htc_service_connect_req connect;
2748 	struct htc_service_connect_resp response;
2749 	A_STATUS status;
2750 	struct dp_soc *dpsoc = soc->dp_soc;
2751 
2752 	qdf_mem_set(&connect, sizeof(connect), 0);
2753 	qdf_mem_set(&response, sizeof(response), 0);
2754 
2755 	connect.pMetaData = NULL;
2756 	connect.MetaDataLength = 0;
2757 	connect.EpCallbacks.pContext = soc;
2758 	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
2759 	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
2760 	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
2761 
2762 	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
2763 	connect.EpCallbacks.EpRecvRefill = NULL;
2764 
2765 	/* N/A, fill is done by HIF */
2766 	connect.EpCallbacks.RecvRefillWaterMark = 1;
2767 
2768 	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
2769 	/*
2770 	 * Specify how deep to let a queue get before htc_send_pkt will
2771 	 * call the EpSendFull function due to excessive send queue depth.
2772 	 */
2773 	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
2774 
2775 	/* disable flow control for HTT data message service */
2776 	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
2777 
2778 	/* connect to control service */
2779 	connect.service_id = HTT_DATA_MSG_SVC;
2780 
2781 	status = htc_connect_service(soc->htc_soc, &connect, &response);
2782 
2783 	if (status != A_OK)
2784 		return QDF_STATUS_E_FAILURE;
2785 
2786 	soc->htc_endpoint = response.Endpoint;
2787 
2788 	hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
2789 	dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
2790 		dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
2791 
2792 	return 0; /* success */
2793 }
2794 
2795 /*
2796  * htt_soc_attach() - SOC level HTT initialization
2797  * @dp_soc:	Opaque Data path SOC handle
2798  * @ctrl_psoc:	Opaque ctrl SOC handle
2799  * @htc_soc:	SOC level HTC handle
2800  * @hal_soc:	Opaque HAL SOC handle
2801  * @osdev:	QDF device
2802  *
2803  * Return: HTT handle on success; NULL on failure
2804  */
2805 void *
2806 htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
2807 	void *hal_soc, qdf_device_t osdev)
2808 {
2809 	struct htt_soc *soc;
2810 	int i;
2811 
2812 	soc = qdf_mem_malloc(sizeof(*soc));
2813 
2814 	if (!soc)
2815 		goto fail1;
2816 
2817 	soc->osdev = osdev;
2818 	soc->ctrl_psoc = ctrl_psoc;
2819 	soc->dp_soc = dp_soc;
2820 	soc->htc_soc = htc_soc;
2821 	soc->hal_soc = hal_soc;
2822 
2823 	/* TODO: See if any NSS related context is requred in htt_soc */
2824 
2825 	soc->htt_htc_pkt_freelist = NULL;
2826 
2827 	if (htt_htc_soc_attach(soc))
2828 		goto fail2;
2829 
2830 	/* TODO: See if any Rx data specific intialization is required. For
2831 	 * MCL use cases, the data will be received as single packet and
2832 	 * should not required any descriptor or reorder handling
2833 	 */
2834 
2835 	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
2836 
2837 	/* pre-allocate some HTC_PACKET objects */
2838 	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
2839 		struct dp_htt_htc_pkt_union *pkt;
2840 		pkt = qdf_mem_malloc(sizeof(*pkt));
2841 		if (!pkt)
2842 			break;
2843 
2844 		htt_htc_pkt_free(soc, &pkt->u.pkt);
2845 	}
2846 
2847 	return soc;
2848 
2849 fail2:
2850 	qdf_mem_free(soc);
2851 
2852 fail1:
2853 	return NULL;
2854 }
2855 
2856 
2857 /*
2858  * htt_soc_detach() - Detach SOC level HTT
2859  * @htt_soc:	HTT SOC handle
2860  */
2861 void
2862 htt_soc_detach(void *htt_soc)
2863 {
2864 	struct htt_soc *soc = (struct htt_soc *)htt_soc;
2865 
2866 	htt_htc_misc_pkt_pool_free(soc);
2867 	htt_htc_pkt_pool_free(soc);
2868 	HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex);
2869 	qdf_mem_free(soc);
2870 }
2871 
2872 /*
2873  * dp_get_pdev_mask_for_channel_id() - Retrieve pdev_id mask based on channel
2874  * information
2875  * @pdev - DP PDEV Handle
2876  * @channel - frequency
2877  *
2878  * Return - Pdev_id mask
2879  */
2880 static inline
2881 uint8_t dp_get_pdev_mask_for_channel_id(struct dp_pdev *pdev, uint8_t channel)
2882 {
2883 	uint8_t pdev_mask = 0;
2884 
2885 	if (!channel)
2886 		return 1 << (pdev->pdev_id + 1);
2887 
2888 	else if (channel && WLAN_CHAN_IS_5GHZ(channel))
2889 		pdev_mask = 0;
2890 
2891 	else if (channel && WLAN_CHAN_IS_2GHZ(channel))
2892 		pdev_mask = 1;
2893 
2894 	return 1 << (pdev_mask + 1);
2895 }
2896 
2897 /**
2898  * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
2899  * @pdev: DP PDEV handle
2900  * @stats_type_upload_mask: stats type requested by user
2901  * @config_param_0: extra configuration parameters
2902  * @config_param_1: extra configuration parameters
2903  * @config_param_2: extra configuration parameters
2904  * @config_param_3: extra configuration parameters
2905  *
2906  * return: QDF STATUS
2907  */
2908 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
2909 		uint32_t stats_type_upload_mask, uint32_t config_param_0,
2910 		uint32_t config_param_1, uint32_t config_param_2,
2911 		uint32_t config_param_3, int cookie_val, int cookie_msb,
2912 		uint8_t channel)
2913 {
2914 	struct htt_soc *soc = pdev->soc->htt_handle;
2915 	struct dp_htt_htc_pkt *pkt;
2916 	qdf_nbuf_t msg;
2917 	uint32_t *msg_word;
2918 	uint8_t pdev_mask = 0;
2919 
2920 	msg = qdf_nbuf_alloc(
2921 			soc->osdev,
2922 			HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
2923 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
2924 
2925 	if (!msg)
2926 		return QDF_STATUS_E_NOMEM;
2927 
2928 	/*TODO:Add support for SOC stats
2929 	 * Bit 0: SOC Stats
2930 	 * Bit 1: Pdev stats for pdev id 0
2931 	 * Bit 2: Pdev stats for pdev id 1
2932 	 * Bit 3: Pdev stats for pdev id 2
2933 	 */
2934 	pdev_mask = dp_get_pdev_mask_for_channel_id(pdev, channel);
2935 
2936 	/*
2937 	 * Set the length of the message.
2938 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
2939 	 * separately during the below call to qdf_nbuf_push_head.
2940 	 * The contribution from the HTC header is added separately inside HTC.
2941 	 */
2942 	if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
2943 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2944 				"Failed to expand head for HTT_EXT_STATS");
2945 		qdf_nbuf_free(msg);
2946 		return QDF_STATUS_E_FAILURE;
2947 	}
2948 
2949 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2950 		"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
2951 		"config_param_1 %u\n config_param_2 %u\n"
2952 		"config_param_4 %u\n -------------\n",
2953 		__func__, __LINE__, cookie_val, config_param_0,
2954 		config_param_1, config_param_2,	config_param_3);
2955 
2956 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
2957 
2958 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
2959 	*msg_word = 0;
2960 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
2961 	HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
2962 	HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
2963 
2964 	/* word 1 */
2965 	msg_word++;
2966 	*msg_word = 0;
2967 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
2968 
2969 	/* word 2 */
2970 	msg_word++;
2971 	*msg_word = 0;
2972 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
2973 
2974 	/* word 3 */
2975 	msg_word++;
2976 	*msg_word = 0;
2977 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
2978 
2979 	/* word 4 */
2980 	msg_word++;
2981 	*msg_word = 0;
2982 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
2983 
2984 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
2985 
2986 	/* word 5 */
2987 	msg_word++;
2988 
2989 	/* word 6 */
2990 	msg_word++;
2991 	*msg_word = 0;
2992 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
2993 
2994 	/* word 7 */
2995 	msg_word++;
2996 	*msg_word = 0;
2997 	/*Using last 2 bits for pdev_id */
2998 	cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
2999 	HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
3000 
3001 	pkt = htt_htc_pkt_alloc(soc);
3002 	if (!pkt) {
3003 		qdf_nbuf_free(msg);
3004 		return QDF_STATUS_E_NOMEM;
3005 	}
3006 
3007 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3008 
3009 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3010 			dp_htt_h2t_send_complete_free_netbuf,
3011 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3012 			soc->htc_endpoint,
3013 			1); /* tag - not relevant here */
3014 
3015 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3016 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3017 	return 0;
3018 }
3019 
3020 /* This macro will revert once proper HTT header will define for
3021  * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
3022  * */
3023 #if defined(WDI_EVENT_ENABLE)
3024 /**
3025  * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
3026  * @pdev: DP PDEV handle
3027  * @stats_type_upload_mask: stats type requested by user
3028  * @mac_id: Mac id number
3029  *
3030  * return: QDF STATUS
3031  */
3032 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
3033 		uint32_t stats_type_upload_mask, uint8_t mac_id)
3034 {
3035 	struct htt_soc *soc = pdev->soc->htt_handle;
3036 	struct dp_htt_htc_pkt *pkt;
3037 	qdf_nbuf_t msg;
3038 	uint32_t *msg_word;
3039 	uint8_t pdev_mask;
3040 
3041 	msg = qdf_nbuf_alloc(
3042 			soc->osdev,
3043 			HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
3044 			HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
3045 
3046 	if (!msg) {
3047 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3048 		"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer\n");
3049 		qdf_assert(0);
3050 		return QDF_STATUS_E_NOMEM;
3051 	}
3052 
3053 	/*TODO:Add support for SOC stats
3054 	 * Bit 0: SOC Stats
3055 	 * Bit 1: Pdev stats for pdev id 0
3056 	 * Bit 2: Pdev stats for pdev id 1
3057 	 * Bit 3: Pdev stats for pdev id 2
3058 	 */
3059 	pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
3060 
3061 	/*
3062 	 * Set the length of the message.
3063 	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
3064 	 * separately during the below call to qdf_nbuf_push_head.
3065 	 * The contribution from the HTC header is added separately inside HTC.
3066 	 */
3067 	if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
3068 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3069 				"Failed to expand head for HTT_CFG_STATS\n");
3070 		qdf_nbuf_free(msg);
3071 		return QDF_STATUS_E_FAILURE;
3072 	}
3073 
3074 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
3075 
3076 	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
3077 	*msg_word = 0;
3078 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
3079 	HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
3080 	HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
3081 			stats_type_upload_mask);
3082 
3083 	pkt = htt_htc_pkt_alloc(soc);
3084 	if (!pkt) {
3085 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3086 				"Fail to allocate dp_htt_htc_pkt buffer\n");
3087 		qdf_assert(0);
3088 		qdf_nbuf_free(msg);
3089 		return QDF_STATUS_E_NOMEM;
3090 	}
3091 
3092 	pkt->soc_ctxt = NULL; /* not used during send-done callback */
3093 
3094 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
3095 			dp_htt_h2t_send_complete_free_netbuf,
3096 			qdf_nbuf_data(msg), qdf_nbuf_len(msg),
3097 			soc->htc_endpoint,
3098 			1); /* tag - not relevant here */
3099 
3100 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
3101 	DP_HTT_SEND_HTC_PKT(soc, pkt);
3102 	return 0;
3103 }
3104 #endif
3105