xref: /wlan-dirver/qcacld-3.0/core/dp/htt/htt_t2h.c (revision 63d7e2a202b3cd37d6c1c20a39582b297a267b6b)
1 /*
2  * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * @file htt_t2h.c
21  * @brief Provide functions to process target->host HTT messages.
22  * @details
23  *  This file contains functions related to target->host HTT messages.
24  *  There are two categories of functions:
25  *  1.  A function that receives a HTT message from HTC, and dispatches it
26  *      based on the HTT message type.
27  *  2.  functions that provide the info elements from specific HTT messages.
28  */
29 #include <wma.h>
30 #include <htc_api.h>            /* HTC_PACKET */
31 #include <htt.h>                /* HTT_T2H_MSG_TYPE, etc. */
32 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
33 
34 #include <ol_rx.h>
35 #include <ol_htt_rx_api.h>
36 #include <ol_htt_tx_api.h>
37 #include <ol_txrx_htt_api.h>    /* htt_tx_status */
38 
39 #include <htt_internal.h>       /* HTT_TX_SCHED, etc. */
40 #include <pktlog_ac_fmt.h>
41 #include <wdi_event.h>
42 #include <ol_htt_tx_api.h>
43 #include <ol_txrx_peer_find.h>
44 #include <cdp_txrx_ipa.h>
45 #include "pktlog_ac.h"
46 #include <cdp_txrx_handle.h>
47 #include <wlan_pkt_capture_ucfg_api.h>
48 #include <ol_txrx.h>
49 /*--- target->host HTT message dispatch function ----------------------------*/
50 
51 #ifndef DEBUG_CREDIT
52 #define DEBUG_CREDIT 0
53 #endif
54 
55 #if defined(CONFIG_HL_SUPPORT)
56 
57 
58 
59 /**
60  * htt_rx_frag_set_last_msdu() - set last msdu bit in rx descriptor
61  *				 for received frames
62  * @pdev: Handle (pointer) to HTT pdev.
63  * @msg: htt received msg
64  *
65  * Return: None
66  */
67 static inline
68 void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, qdf_nbuf_t msg)
69 {
70 }
71 #else
72 
73 static void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, qdf_nbuf_t msg)
74 {
75 	uint32_t *msg_word;
76 	unsigned int num_msdu_bytes;
77 	qdf_nbuf_t msdu;
78 	struct htt_host_rx_desc_base *rx_desc;
79 	int start_idx;
80 	uint8_t *p_fw_msdu_rx_desc = 0;
81 
82 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
83 	num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
84 		*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
85 	/*
86 	 * 1 word for the message header,
87 	 * 1 word to specify the number of MSDU bytes,
88 	 * 1 word for every 4 MSDU bytes (round up),
89 	 * 1 word for the MPDU range header
90 	 */
91 	pdev->rx_mpdu_range_offset_words = 3 + ((num_msdu_bytes + 3) >> 2);
92 	pdev->rx_ind_msdu_byte_idx = 0;
93 
94 	p_fw_msdu_rx_desc = ((uint8_t *) (msg_word) +
95 			     HTT_ENDIAN_BYTE_IDX_SWAP
96 				     (HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET));
97 
98 	/*
99 	 * Fix for EV126710, in which BSOD occurs due to last_msdu bit
100 	 * not set while the next pointer is deliberately set to NULL
101 	 * before calling ol_rx_pn_check_base()
102 	 *
103 	 * For fragment frames, the HW may not have set the last_msdu bit
104 	 * in the rx descriptor, but the SW expects this flag to be set,
105 	 * since each fragment is in a separate MPDU. Thus, set the flag here,
106 	 * just in case the HW didn't.
107 	 */
108 	start_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
109 	msdu = pdev->rx_ring.buf.netbufs_ring[start_idx];
110 	qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
111 	qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
112 	rx_desc = htt_rx_desc(msdu);
113 	*((uint8_t *) &rx_desc->fw_desc.u.val) = *p_fw_msdu_rx_desc;
114 	rx_desc->msdu_end.last_msdu = 1;
115 	qdf_nbuf_map(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
116 }
117 #endif
118 
119 static uint8_t *htt_t2h_mac_addr_deswizzle(uint8_t *tgt_mac_addr,
120 					   uint8_t *buffer)
121 {
122 #ifdef BIG_ENDIAN_HOST
123 	/*
124 	 * The host endianness is opposite of the target endianness.
125 	 * To make uint32_t elements come out correctly, the target->host
126 	 * upload has swizzled the bytes in each uint32_t element of the
127 	 * message.
128 	 * For byte-array message fields like the MAC address, this
129 	 * upload swizzling puts the bytes in the wrong order, and needs
130 	 * to be undone.
131 	 */
132 	buffer[0] = tgt_mac_addr[3];
133 	buffer[1] = tgt_mac_addr[2];
134 	buffer[2] = tgt_mac_addr[1];
135 	buffer[3] = tgt_mac_addr[0];
136 	buffer[4] = tgt_mac_addr[7];
137 	buffer[5] = tgt_mac_addr[6];
138 	return buffer;
139 #else
140 	/*
141 	 * The host endianness matches the target endianness -
142 	 * we can use the mac addr directly from the message buffer.
143 	 */
144 	return tgt_mac_addr;
145 #endif
146 }
147 
148 /**
149  * htt_ipa_op_response() - invoke an event handler from FW
150  * @pdev: Handle (pointer) to HTT pdev.
151  * @msg_word: htt msg
152  *
153  * Return: None
154  */
155 #ifdef IPA_OFFLOAD
156 static void htt_ipa_op_response(struct htt_pdev_t *pdev, uint32_t *msg_word)
157 {
158 	uint8_t op_code;
159 	uint16_t len;
160 	uint8_t *op_msg_buffer;
161 	uint8_t *msg_start_ptr;
162 
163 	htc_pm_runtime_put(pdev->htc_pdev);
164 	msg_start_ptr = (uint8_t *) msg_word;
165 	op_code =
166 		HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word);
167 	msg_word++;
168 	len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word);
169 
170 	op_msg_buffer =
171 		qdf_mem_malloc(sizeof
172 				(struct htt_wdi_ipa_op_response_t) +
173 				len);
174 	if (!op_msg_buffer)
175 		return;
176 
177 	qdf_mem_copy(op_msg_buffer,
178 			msg_start_ptr,
179 			sizeof(struct htt_wdi_ipa_op_response_t) +
180 			len);
181 	cdp_ipa_op_response(cds_get_context(QDF_MODULE_ID_SOC),
182 			    OL_TXRX_PDEV_ID, op_msg_buffer);
183 }
184 #else
185 static void htt_ipa_op_response(struct htt_pdev_t *pdev, uint32_t *msg_word)
186 {
187 }
188 #endif
189 
190 #ifndef QCN7605_SUPPORT
191 static int htt_t2h_adjust_bus_target_delta(struct htt_pdev_t *pdev,
192 					   int32_t htt_credit_delta)
193 {
194 	if (pdev->cfg.is_high_latency && !pdev->cfg.default_tx_comp_req) {
195 		HTT_TX_MUTEX_ACQUIRE(&pdev->credit_mutex);
196 		qdf_atomic_add(htt_credit_delta,
197 			       &pdev->htt_tx_credit.target_delta);
198 		htt_credit_delta = htt_tx_credit_update(pdev);
199 		HTT_TX_MUTEX_RELEASE(&pdev->credit_mutex);
200 	}
201 	return htt_credit_delta;
202 }
203 #else
204 static int htt_t2h_adjust_bus_target_delta(struct htt_pdev_t *pdev,
205 					   int32_t htt_credit_delta)
206 {
207 	return htt_credit_delta;
208 }
209 #endif
210 
211 #define MAX_TARGET_TX_CREDIT    204800
212 #define HTT_CFR_DUMP_COMPL_HEAD_SZ	4
213 
214 /* Target to host Msg/event  handler  for low priority messages*/
215 static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
216 				   bool free_msg_buf)
217 {
218 	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
219 	uint32_t *msg_word;
220 	enum htt_t2h_msg_type msg_type;
221 
222 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
223 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
224 	switch (msg_type) {
225 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
226 	{
227 		htc_pm_runtime_put(pdev->htc_pdev);
228 		pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
229 		pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
230 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
231 			  "target uses HTT version %d.%d; host uses %d.%d",
232 			  pdev->tgt_ver.major, pdev->tgt_ver.minor,
233 			  HTT_CURRENT_VERSION_MAJOR,
234 			  HTT_CURRENT_VERSION_MINOR);
235 		if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR)
236 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_WARN,
237 				  "*** Incompatible host/target HTT versions!");
238 		/* abort if the target is incompatible with the host */
239 		qdf_assert(pdev->tgt_ver.major ==
240 			   HTT_CURRENT_VERSION_MAJOR);
241 		if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
242 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
243 				  "*** Warning: host/target HTT versions are ");
244 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
245 				  "different, though compatible!");
246 		}
247 		break;
248 	}
249 	case HTT_T2H_MSG_TYPE_RX_FLUSH:
250 	{
251 		uint16_t peer_id;
252 		uint8_t tid;
253 		uint16_t seq_num_start, seq_num_end;
254 		enum htt_rx_flush_action action;
255 
256 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_RX_FLUSH_BYTES) {
257 			qdf_print("invalid nbuff len");
258 			WARN_ON(1);
259 			break;
260 		}
261 
262 		peer_id = HTT_RX_FLUSH_PEER_ID_GET(*msg_word);
263 		tid = HTT_RX_FLUSH_TID_GET(*msg_word);
264 		seq_num_start =
265 			HTT_RX_FLUSH_SEQ_NUM_START_GET(*(msg_word + 1));
266 		seq_num_end =
267 			HTT_RX_FLUSH_SEQ_NUM_END_GET(*(msg_word + 1));
268 		action =
269 			HTT_RX_FLUSH_MPDU_STATUS_GET(*(msg_word + 1)) ==
270 			1 ? htt_rx_flush_release : htt_rx_flush_discard;
271 		ol_rx_flush_handler(pdev->txrx_pdev, peer_id, tid,
272 				    seq_num_start, seq_num_end, action);
273 		break;
274 	}
275 	case HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND:
276 	{
277 		uint16_t msdu_cnt;
278 
279 		if (!pdev->cfg.is_high_latency &&
280 		    pdev->cfg.is_full_reorder_offload) {
281 			qdf_print("HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND not ");
282 			qdf_print("supported when full reorder offload is ");
283 			qdf_print("enabled in the configuration.\n");
284 			break;
285 		}
286 		msdu_cnt =
287 			HTT_RX_OFFLOAD_DELIVER_IND_MSDU_CNT_GET(*msg_word);
288 		ol_rx_offload_deliver_ind_handler(pdev->txrx_pdev,
289 						  htt_t2h_msg,
290 						  msdu_cnt);
291 		if (pdev->cfg.is_high_latency) {
292 			/*
293 			 * return here for HL to avoid double free on
294 			 * htt_t2h_msg
295 			 */
296 			return;
297 		}
298 		break;
299 	}
300 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND:
301 	{
302 		uint16_t peer_id;
303 		uint8_t tid;
304 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
305 
306 		if (msg_len < HTT_RX_FRAG_IND_BYTES) {
307 			qdf_print("invalid nbuff len");
308 			WARN_ON(1);
309 			break;
310 		}
311 		peer_id = HTT_RX_FRAG_IND_PEER_ID_GET(*msg_word);
312 		tid = HTT_RX_FRAG_IND_EXT_TID_GET(*msg_word);
313 		htt_rx_frag_set_last_msdu(pdev, htt_t2h_msg);
314 
315 		/* If packet len is invalid, will discard this frame. */
316 		if (pdev->cfg.is_high_latency) {
317 			u_int32_t rx_pkt_len = 0;
318 
319 			rx_pkt_len = qdf_nbuf_len(htt_t2h_msg);
320 
321 			if (rx_pkt_len < (HTT_RX_FRAG_IND_BYTES +
322 				sizeof(struct hl_htt_rx_ind_base)+
323 				sizeof(struct ieee80211_frame))) {
324 
325 				qdf_print("invalid packet len, %u", rx_pkt_len);
326 				/*
327 				 * This buf will be freed before
328 				 * exiting this function.
329 				 */
330 				break;
331 			}
332 		}
333 
334 		ol_rx_frag_indication_handler(pdev->txrx_pdev,
335 					      htt_t2h_msg,
336 					      peer_id, tid);
337 
338 		if (pdev->cfg.is_high_latency) {
339 			/*
340 			* For high latency solution,
341 			* HTT_T2H_MSG_TYPE_RX_FRAG_IND message and RX packet
342 			* share the same buffer. All buffer will be freed by
343 			* ol_rx_frag_indication_handler or upper layer to
344 			* avoid double free issue.
345 			*
346 			*/
347 			return;
348 		}
349 
350 		break;
351 	}
352 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
353 	{
354 		uint16_t peer_id;
355 		uint8_t tid;
356 		uint8_t win_sz;
357 		uint16_t start_seq_num;
358 
359 		/*
360 		 * FOR NOW, the host doesn't need to know the initial
361 		 * sequence number for rx aggregation.
362 		 * Thus, any value will do - specify 0.
363 		 */
364 		start_seq_num = 0;
365 		peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
366 		tid = HTT_RX_ADDBA_TID_GET(*msg_word);
367 		win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
368 		ol_rx_addba_handler(pdev->txrx_pdev, peer_id, tid,
369 				    win_sz, start_seq_num,
370 				    0 /* success */);
371 		break;
372 	}
373 	case HTT_T2H_MSG_TYPE_RX_DELBA:
374 	{
375 		uint16_t peer_id;
376 		uint8_t tid;
377 
378 		peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
379 		tid = HTT_RX_DELBA_TID_GET(*msg_word);
380 		ol_rx_delba_handler(pdev->txrx_pdev, peer_id, tid);
381 		break;
382 	}
383 	case HTT_T2H_MSG_TYPE_PEER_MAP:
384 	{
385 		uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
386 		uint8_t *peer_mac_addr;
387 		uint16_t peer_id;
388 		uint8_t vdev_id;
389 
390 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_RX_PEER_MAP_BYTES) {
391 			qdf_print("invalid nbuff len");
392 			WARN_ON(1);
393 			break;
394 		}
395 
396 		peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
397 		vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
398 		peer_mac_addr = htt_t2h_mac_addr_deswizzle(
399 			(uint8_t *) (msg_word + 1),
400 			&mac_addr_deswizzle_buf[0]);
401 
402 		if (peer_id > ol_cfg_max_peer_id(pdev->ctrl_pdev)) {
403 			qdf_print("%s: HTT_T2H_MSG_TYPE_PEER_MAP,"
404 				"invalid peer_id, %u\n",
405 				__FUNCTION__,
406 				peer_id);
407 			break;
408 		}
409 
410 		ol_rx_peer_map_handler(pdev->txrx_pdev, peer_id,
411 				       vdev_id, peer_mac_addr,
412 				       1 /*can tx */);
413 		break;
414 	}
415 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
416 	{
417 		uint16_t peer_id;
418 
419 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_RX_PEER_UNMAP_BYTES) {
420 			qdf_print("invalid nbuff len");
421 			WARN_ON(1);
422 			break;
423 		}
424 
425 		peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
426 		if (peer_id > ol_cfg_max_peer_id(pdev->ctrl_pdev)) {
427 			qdf_print("%s: HTT_T2H_MSG_TYPE_PEER_UNMAP,"
428 				"invalid peer_id, %u\n",
429 				__FUNCTION__,
430 				peer_id);
431 			break;
432 		}
433 
434 		ol_rx_peer_unmap_handler(pdev->txrx_pdev, peer_id);
435 		break;
436 	}
437 	case HTT_T2H_MSG_TYPE_SEC_IND:
438 	{
439 		uint16_t peer_id;
440 		enum htt_sec_type sec_type;
441 		int is_unicast;
442 
443 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_SEC_IND_BYTES) {
444 			qdf_print("invalid nbuff len");
445 			WARN_ON(1);
446 			break;
447 		}
448 
449 		peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
450 		sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
451 		is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
452 		msg_word++;   /* point to the first part of the Michael key */
453 		ol_rx_sec_ind_handler(pdev->txrx_pdev, peer_id,
454 				      sec_type, is_unicast, msg_word,
455 				      msg_word + 2);
456 		break;
457 	}
458 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND:
459 	{
460 		struct htt_mgmt_tx_compl_ind *compl_msg;
461 		int32_t credit_delta = 1;
462 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
463 		if (msg_len < (sizeof(struct htt_mgmt_tx_compl_ind) + sizeof(*msg_word))) {
464 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
465 				  "Invalid msg_word length in HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND");
466 			WARN_ON(1);
467 			break;
468 		}
469 
470 		compl_msg =
471 			(struct htt_mgmt_tx_compl_ind *)(msg_word + 1);
472 
473 		if (pdev->cfg.is_high_latency) {
474 			if (!pdev->cfg.default_tx_comp_req) {
475 				HTT_TX_MUTEX_ACQUIRE(&pdev->credit_mutex);
476 				qdf_atomic_add(credit_delta,
477 					       &pdev->htt_tx_credit.
478 								target_delta);
479 				credit_delta = htt_tx_credit_update(pdev);
480 				HTT_TX_MUTEX_RELEASE(&pdev->credit_mutex);
481 			}
482 			if (credit_delta)
483 				ol_tx_target_credit_update(
484 						pdev->txrx_pdev, credit_delta);
485 		}
486 		ol_tx_desc_update_group_credit(
487 			pdev->txrx_pdev, compl_msg->desc_id, 1,
488 			0, compl_msg->status);
489 
490 		DPTRACE(qdf_dp_trace_credit_record(QDF_TX_COMP, QDF_CREDIT_INC,
491 			1, qdf_atomic_read(&pdev->txrx_pdev->target_tx_credit),
492 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[0].credit),
493 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[1].credit)));
494 
495 		if (!ol_tx_get_is_mgmt_over_wmi_enabled()) {
496 			ol_tx_single_completion_handler(pdev->txrx_pdev,
497 							compl_msg->status,
498 							compl_msg->desc_id);
499 			htc_pm_runtime_put(pdev->htc_pdev);
500 			HTT_TX_SCHED(pdev);
501 		} else {
502 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
503 				  "Ignoring HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND indication");
504 		}
505 		break;
506 	}
507 	case HTT_T2H_MSG_TYPE_STATS_CONF:
508 	{
509 		uint8_t cookie;
510 		uint8_t *stats_info_list;
511 
512 		cookie = *(msg_word + 1);
513 
514 		stats_info_list = (uint8_t *) (msg_word + 3);
515 		htc_pm_runtime_put(pdev->htc_pdev);
516 		ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie,
517 					 stats_info_list);
518 		break;
519 	}
520 #ifndef REMOVE_PKT_LOG
521 	case HTT_T2H_MSG_TYPE_PKTLOG:
522 	{
523 		uint32_t len = qdf_nbuf_len(htt_t2h_msg);
524 
525 		if (len < sizeof(*msg_word) + sizeof(uint32_t)) {
526 			qdf_print("invalid nbuff len");
527 			WARN_ON(1);
528 			break;
529 		}
530 
531 		/*len is reduced by sizeof(*msg_word)*/
532 		pktlog_process_fw_msg(OL_TXRX_PDEV_ID, msg_word + 1,
533 				      len - sizeof(*msg_word));
534 		break;
535 	}
536 #endif
537 	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
538 	{
539 		uint32_t htt_credit_delta_abs;
540 		int32_t htt_credit_delta;
541 		int sign, old_credit;
542 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
543 
544 		if (msg_len < HTT_TX_CREDIT_MSG_BYTES) {
545 			qdf_print("invalid nbuff len");
546 			WARN_ON(1);
547 			break;
548 		}
549 
550 		htt_credit_delta_abs =
551 			HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word);
552 		sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1;
553 		htt_credit_delta = sign * htt_credit_delta_abs;
554 
555 		old_credit = qdf_atomic_read(&pdev->htt_tx_credit.target_delta);
556 		if (((old_credit + htt_credit_delta) > MAX_TARGET_TX_CREDIT) ||
557 			((old_credit + htt_credit_delta) < -MAX_TARGET_TX_CREDIT)) {
558 			qdf_err("invalid update,old_credit=%d, htt_credit_delta=%d",
559 				old_credit, htt_credit_delta);
560 			break;
561 		}
562 		htt_credit_delta =
563 		htt_t2h_adjust_bus_target_delta(pdev, htt_credit_delta);
564 		htt_tx_group_credit_process(pdev, msg_word);
565 		DPTRACE(qdf_dp_trace_credit_record(QDF_TX_CREDIT_UPDATE,
566 			QDF_CREDIT_INC,	htt_credit_delta,
567 			qdf_atomic_read(&pdev->txrx_pdev->target_tx_credit) +
568 			htt_credit_delta,
569 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[0].credit),
570 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[1].credit)));
571 
572 		ol_tx_credit_completion_handler(pdev->txrx_pdev,
573 						htt_credit_delta);
574 		break;
575 	}
576 
577 	case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE:
578 	{
579 		uint16_t len;
580 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
581 		len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*(msg_word + 1));
582 
583 		if (sizeof(struct htt_wdi_ipa_op_response_t) + len > msg_len) {
584 			qdf_print("Invalid buf len size %zu len %d, msg_len %d",
585 				  sizeof(struct htt_wdi_ipa_op_response_t),
586 				  len, msg_len);
587 			WARN_ON(1);
588 			break;
589 		}
590 		htt_ipa_op_response(pdev, msg_word);
591 		break;
592 	}
593 
594 	case HTT_T2H_MSG_TYPE_FLOW_POOL_MAP:
595 	{
596 		uint8_t num_flows;
597 		struct htt_flow_pool_map_payload_t *pool_map_payoad;
598 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
599 
600 		num_flows = HTT_FLOW_POOL_MAP_NUM_FLOWS_GET(*msg_word);
601 
602 		if (((HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
603 			HTT_FLOW_POOL_MAP_HEADER_SZ) * num_flows + 1) * sizeof(*msg_word) > msg_len) {
604 			qdf_print("Invalid num_flows");
605 			WARN_ON(1);
606 			break;
607 		}
608 
609 		msg_word++;
610 		while (num_flows) {
611 			pool_map_payoad = (struct htt_flow_pool_map_payload_t *)
612 								msg_word;
613 			ol_tx_flow_pool_map_handler(pool_map_payoad->flow_id,
614 					pool_map_payoad->flow_type,
615 					pool_map_payoad->flow_pool_id,
616 					pool_map_payoad->flow_pool_size);
617 
618 			msg_word += (HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
619 						 HTT_FLOW_POOL_MAP_HEADER_SZ);
620 			num_flows--;
621 		}
622 		break;
623 	}
624 
625 	case HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP:
626 	{
627 		struct htt_flow_pool_unmap_t *pool_numap_payload;
628 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
629 
630 		if (msg_len < sizeof(struct htt_flow_pool_unmap_t)) {
631 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
632 				  "Invalid msg_word length in HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP");
633 			WARN_ON(1);
634 			break;
635 		}
636 
637 		pool_numap_payload = (struct htt_flow_pool_unmap_t *)msg_word;
638 		ol_tx_flow_pool_unmap_handler(pool_numap_payload->flow_id,
639 					pool_numap_payload->flow_type,
640 					pool_numap_payload->flow_pool_id);
641 		break;
642 	}
643 
644 	case HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE:
645 	{
646 		struct htt_flow_pool_resize_t *msg;
647 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
648 
649 		if (msg_len < sizeof(struct htt_flow_pool_resize_t)) {
650 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
651 				  "Invalid msg_word length in HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE");
652 			WARN_ON(1);
653 			break;
654 		}
655 
656 		msg = (struct htt_flow_pool_resize_t *)msg_word;
657 		ol_tx_flow_pool_resize_handler(msg->flow_pool_id,
658 					       msg->flow_pool_new_size);
659 
660 		break;
661 	}
662 
663 	case HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR:
664 	{
665 		switch (HTT_RX_OFLD_PKT_ERR_MSG_SUB_TYPE_GET(*msg_word)) {
666 		case HTT_RX_OFLD_PKT_ERR_TYPE_MIC_ERR:
667 		{
668 			struct ol_txrx_vdev_t *vdev;
669 			struct ol_txrx_peer_t *peer;
670 			uint64_t pn;
671 			uint32_t key_id;
672 			uint16_t peer_id;
673 			int msg_len = qdf_nbuf_len(htt_t2h_msg);
674 
675 			if (msg_len < HTT_RX_OFLD_PKT_ERR_MIC_ERR_BYTES) {
676 				qdf_print("invalid nbuff len");
677 				WARN_ON(1);
678 				break;
679 			}
680 
681 			peer_id = HTT_RX_OFLD_PKT_ERR_MIC_ERR_PEER_ID_GET
682 				(*(msg_word + 1));
683 
684 			peer = ol_txrx_peer_find_by_id(pdev->txrx_pdev,
685 				 peer_id);
686 			if (!peer) {
687 				qdf_print("invalid peer id %d", peer_id);
688 				qdf_assert(0);
689 				break;
690 			}
691 			vdev = peer->vdev;
692 			key_id = HTT_RX_OFLD_PKT_ERR_MIC_ERR_KEYID_GET
693 				(*(msg_word + 1));
694 			qdf_mem_copy(&pn, (uint8_t *)(msg_word + 6), 6);
695 
696 			ol_rx_send_mic_err_ind(vdev->pdev, vdev->vdev_id,
697 					       peer->mac_addr.raw, 0, 0,
698 					       OL_RX_ERR_TKIP_MIC, htt_t2h_msg,
699 					       &pn, key_id);
700 			break;
701 		}
702 		default:
703 		{
704 			qdf_print("unhandled error type %d",
705 			    HTT_RX_OFLD_PKT_ERR_MSG_SUB_TYPE_GET(*msg_word));
706 		break;
707 		}
708 		}
709 	}
710 #ifdef WLAN_CFR_ENABLE
711 	case HTT_T2H_MSG_TYPE_CFR_DUMP_COMPL_IND:
712 	{
713 		int expected_len;
714 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
715 
716 		expected_len = HTT_CFR_DUMP_COMPL_HEAD_SZ +
717 				sizeof(struct htt_cfr_dump_compl_ind);
718 		if (msg_len < expected_len) {
719 			qdf_print("Invalid length of CFR capture event");
720 			break;
721 		}
722 
723 		ol_rx_cfr_capture_msg_handler(htt_t2h_msg);
724 		break;
725 	}
726 #endif
727 	default:
728 		break;
729 	};
730 	/* Free the indication buffer */
731 	if (free_msg_buf)
732 		qdf_nbuf_free(htt_t2h_msg);
733 }
734 
735 #define HTT_TX_COMPL_HEAD_SZ			4
736 #define HTT_TX_COMPL_BYTES_PER_MSDU_ID		2
737 
738 /**
739  * Generic Target to host Msg/event  handler  for low priority messages
740  * Low priority message are handler in a different handler called from
741  * this function . So that the most likely succes path like Rx and
742  * Tx comp   has little code   foot print
743  */
744 void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
745 {
746 	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
747 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
748 	uint32_t *msg_word;
749 	enum htt_t2h_msg_type msg_type;
750 
751 	/* check for successful message reception */
752 	if (pkt->Status != QDF_STATUS_SUCCESS) {
753 		if (pkt->Status != QDF_STATUS_E_CANCELED)
754 			pdev->stats.htc_err_cnt++;
755 		qdf_nbuf_free(htt_t2h_msg);
756 		return;
757 	}
758 #ifdef HTT_RX_RESTORE
759 	if (qdf_unlikely(pdev->rx_ring.rx_reset)) {
760 		qdf_print("rx restore ..\n");
761 		qdf_nbuf_free(htt_t2h_msg);
762 		return;
763 	}
764 #endif
765 
766 	/* confirm alignment */
767 	HTT_ASSERT3((((unsigned long)qdf_nbuf_data(htt_t2h_msg)) & 0x3) == 0);
768 
769 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
770 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
771 
772 #if defined(HELIUMPLUS_DEBUG)
773 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
774 		  "%s %d: msg_word 0x%x msg_type %d", __func__, __LINE__,
775 		  *msg_word, msg_type);
776 #endif
777 
778 	switch (msg_type) {
779 	case HTT_T2H_MSG_TYPE_RX_IND:
780 	{
781 		unsigned int num_mpdu_ranges;
782 		unsigned int num_msdu_bytes;
783 		unsigned int calculated_msg_len;
784 		unsigned int rx_mpdu_range_offset_bytes;
785 		uint16_t peer_id;
786 		uint8_t tid;
787 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
788 
789 		if (qdf_unlikely(pdev->cfg.is_full_reorder_offload)) {
790 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND not supported ");
791 			qdf_print("with full reorder offload\n");
792 			break;
793 		}
794 		peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
795 		tid = HTT_RX_IND_EXT_TID_GET(*msg_word);
796 
797 		if (tid >= OL_TXRX_NUM_EXT_TIDS) {
798 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n",
799 				tid);
800 			break;
801 		}
802 		if (msg_len < (2 + HTT_RX_PPDU_DESC_SIZE32 + 1) * sizeof(uint32_t)) {
803 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid msg_len\n");
804 			break;
805 		}
806 		num_msdu_bytes =
807 			HTT_RX_IND_FW_RX_DESC_BYTES_GET(
808 				*(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32));
809 		/*
810 		 * 1 word for the message header,
811 		 * HTT_RX_PPDU_DESC_SIZE32 words for the FW rx PPDU desc
812 		 * 1 word to specify the number of MSDU bytes,
813 		 * 1 word for every 4 MSDU bytes (round up),
814 		 * 1 word for the MPDU range header
815 		 */
816 		rx_mpdu_range_offset_bytes =
817 			(HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3);
818 		if (qdf_unlikely(num_msdu_bytes >
819 				 rx_mpdu_range_offset_bytes)) {
820 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %u\n",
821 				  "num_msdu_bytes",
822 				  num_msdu_bytes);
823 			WARN_ON(1);
824 			break;
825 		}
826 		pdev->rx_mpdu_range_offset_words =
827 			rx_mpdu_range_offset_bytes >> 2;
828 		num_mpdu_ranges =
829 			HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
830 		pdev->rx_ind_msdu_byte_idx = 0;
831 		if (qdf_unlikely(rx_mpdu_range_offset_bytes >
832 		    msg_len)) {
833 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %d\n",
834 				  "rx_mpdu_range_offset_words",
835 				  pdev->rx_mpdu_range_offset_words);
836 			WARN_ON(1);
837 			break;
838 		}
839 		calculated_msg_len = rx_mpdu_range_offset_bytes +
840 			(num_mpdu_ranges * (int)sizeof(uint32_t));
841 		/*
842 		 * Check that the addition and multiplication
843 		 * do not cause integer overflow
844 		 */
845 		if (qdf_unlikely(calculated_msg_len <
846 		    rx_mpdu_range_offset_bytes)) {
847 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %u\n",
848 				  "num_mpdu_ranges",
849 				  (num_mpdu_ranges * (int)sizeof(uint32_t)));
850 			WARN_ON(1);
851 			break;
852 		}
853 		if (qdf_unlikely(calculated_msg_len > msg_len)) {
854 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %u\n",
855 				  "offset_words + mpdu_ranges",
856 				  calculated_msg_len);
857 			WARN_ON(1);
858 			break;
859 		}
860 		ol_rx_indication_handler(pdev->txrx_pdev,
861 					 htt_t2h_msg, peer_id,
862 					 tid, num_mpdu_ranges);
863 
864 		if (pdev->cfg.is_high_latency)
865 			return;
866 
867 		break;
868 	}
869 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
870 	{
871 		int old_credit;
872 		int num_msdus;
873 		enum htt_tx_status status;
874 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
875 
876 		/* status - no enum translation needed */
877 		status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
878 		num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
879 
880 		/*
881 		 * each desc id will occupy 2 bytes.
882 		 * the 4 is for htt msg header
883 		 */
884 		if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
885 			HTT_TX_COMPL_HEAD_SZ) > msg_len) {
886 			qdf_print("%s: num_msdus(%d) is invalid,"
887 				"adf_nbuf_len = %d\n",
888 				__FUNCTION__,
889 				num_msdus,
890 				msg_len);
891 			break;
892 		}
893 
894 		if (num_msdus & 0x1) {
895 			struct htt_tx_compl_ind_base *compl =
896 				(void *)msg_word;
897 
898 			/*
899 			 * Host CPU endianness can be different from FW CPU.
900 			 * This can result in even and odd MSDU IDs being
901 			 * switched. If this happens, copy the switched final
902 			 * odd MSDU ID from location payload[size], to
903 			 * location payload[size-1], where the message
904 			 * handler function expects to find it
905 			 */
906 			if (compl->payload[num_msdus] !=
907 			    HTT_TX_COMPL_INV_MSDU_ID) {
908 				compl->payload[num_msdus - 1] =
909 					compl->payload[num_msdus];
910 			}
911 		}
912 
913 		if (pdev->cfg.is_high_latency &&
914 		    !pdev->cfg.credit_update_enabled) {
915 			old_credit = qdf_atomic_read(
916 						&pdev->htt_tx_credit.target_delta);
917 			if (((old_credit + num_msdus) > MAX_TARGET_TX_CREDIT) ||
918 				((old_credit + num_msdus) < -MAX_TARGET_TX_CREDIT)) {
919 				qdf_err("invalid update,old_credit=%d, num_msdus=%d",
920 					old_credit, num_msdus);
921 			} else {
922 				if (!pdev->cfg.default_tx_comp_req) {
923 					int credit_delta;
924 
925 					HTT_TX_MUTEX_ACQUIRE(&pdev->credit_mutex);
926 					qdf_atomic_add(num_msdus,
927 						       &pdev->htt_tx_credit.
928 							target_delta);
929 					credit_delta = htt_tx_credit_update(pdev);
930 					HTT_TX_MUTEX_RELEASE(&pdev->credit_mutex);
931 
932 					if (credit_delta) {
933 						ol_tx_target_credit_update(
934 								pdev->txrx_pdev,
935 								credit_delta);
936 					}
937 				} else {
938 					ol_tx_target_credit_update(pdev->txrx_pdev,
939 								   num_msdus);
940 				}
941 			}
942 		}
943 
944 		ol_tx_completion_handler(pdev->txrx_pdev, num_msdus,
945 					 status, msg_word);
946 		HTT_TX_SCHED(pdev);
947 		break;
948 	}
949 	case HTT_T2H_MSG_TYPE_RX_PN_IND:
950 	{
951 		uint16_t peer_id;
952 		uint8_t tid, pn_ie_cnt, *pn_ie = NULL;
953 		uint16_t seq_num_start, seq_num_end;
954 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
955 
956 		if (msg_len < HTT_RX_PN_IND_BYTES) {
957 			qdf_print("invalid nbuff len");
958 			WARN_ON(1);
959 			break;
960 		}
961 
962 		/*First dword */
963 		peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
964 		tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);
965 
966 		msg_word++;
967 		/*Second dword */
968 		seq_num_start =
969 			HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
970 		seq_num_end = HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
971 		pn_ie_cnt = HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);
972 
973 		if (msg_len - HTT_RX_PN_IND_BYTES <
974 		    pn_ie_cnt * sizeof(uint8_t)) {
975 			qdf_print("invalid pn_ie count");
976 			WARN_ON(1);
977 			break;
978 		}
979 
980 		msg_word++;
981 		/*Third dword */
982 		if (pn_ie_cnt)
983 			pn_ie = (uint8_t *) msg_word;
984 
985 		ol_rx_pn_ind_handler(pdev->txrx_pdev, peer_id, tid,
986 				     seq_num_start, seq_num_end,
987 				     pn_ie_cnt, pn_ie);
988 
989 		break;
990 	}
991 	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
992 	{
993 		int num_msdus;
994 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
995 
996 		num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
997 		/*
998 		 * each desc id will occupy 2 bytes.
999 		 * the 4 is for htt msg header
1000 		 */
1001 		if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
1002 			HTT_TX_COMPL_HEAD_SZ) > msg_len) {
1003 			qdf_print("%s: num_msdus(%d) is invalid,"
1004 				"adf_nbuf_len = %d\n",
1005 				__FUNCTION__,
1006 				num_msdus,
1007 				msg_len);
1008 			break;
1009 		}
1010 
1011 		if (num_msdus & 0x1) {
1012 			struct htt_tx_compl_ind_base *compl =
1013 				(void *)msg_word;
1014 
1015 			/*
1016 			 * Host CPU endianness can be different from FW CPU.
1017 			 * This can result in even and odd MSDU IDs being
1018 			 * switched. If this happens, copy the switched final
1019 			 * odd MSDU ID from location payload[size], to
1020 			 * location payload[size-1], where the message handler
1021 			 * function expects to find it
1022 			 */
1023 			if (compl->payload[num_msdus] !=
1024 			    HTT_TX_COMPL_INV_MSDU_ID) {
1025 				compl->payload[num_msdus - 1] =
1026 					compl->payload[num_msdus];
1027 			}
1028 		}
1029 		ol_tx_inspect_handler(pdev->txrx_pdev, num_msdus,
1030 				      msg_word + 1);
1031 		HTT_TX_SCHED(pdev);
1032 		break;
1033 	}
1034 	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
1035 	{
1036 		uint16_t peer_id;
1037 		uint8_t tid;
1038 		uint8_t offload_ind, frag_ind;
1039 
1040 		if (qdf_unlikely(!pdev->cfg.is_full_reorder_offload)) {
1041 			qdf_print("full reorder offload is disable");
1042 			break;
1043 		}
1044 
1045 		if (qdf_unlikely(pdev->cfg.is_high_latency)) {
1046 			qdf_print("full reorder offload not support in HL");
1047 			break;
1048 		}
1049 
1050 		peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word);
1051 		tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word);
1052 		offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
1053 		frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
1054 
1055 #if defined(HELIUMPLUS_DEBUG)
1056 		qdf_print("peerid %d tid %d offloadind %d fragind %d",
1057 			  peer_id, tid, offload_ind,
1058 			  frag_ind);
1059 #endif
1060 		if (qdf_unlikely(frag_ind)) {
1061 			ol_rx_frag_indication_handler(pdev->txrx_pdev,
1062 						      htt_t2h_msg,
1063 						      peer_id, tid);
1064 			break;
1065 		}
1066 
1067 		ol_rx_in_order_indication_handler(pdev->txrx_pdev,
1068 						  htt_t2h_msg, peer_id,
1069 						  tid, offload_ind);
1070 		break;
1071 	}
1072 
1073 	default:
1074 		htt_t2h_lp_msg_handler(context, htt_t2h_msg, true);
1075 		return;
1076 
1077 	};
1078 
1079 	/* Free the indication buffer */
1080 	qdf_nbuf_free(htt_t2h_msg);
1081 }
1082 
1083 #ifdef WLAN_FEATURE_FASTPATH
1084 #define HTT_T2H_MSG_BUF_REINIT(_buf, dev)				\
1085 	do {								\
1086 		QDF_NBUF_CB_PADDR(_buf) -= (HTC_HEADER_LEN +		\
1087 					HTC_HDR_ALIGNMENT_PADDING);	\
1088 		qdf_nbuf_init_fast((_buf));				\
1089 		qdf_mem_dma_sync_single_for_device(dev,			\
1090 					(QDF_NBUF_CB_PADDR(_buf)),	\
1091 					(skb_end_pointer(_buf) -	\
1092 					(_buf)->data),			\
1093 					PCI_DMA_FROMDEVICE);		\
1094 	} while (0)
1095 
1096 /**
1097  * htt_t2h_msg_handler_fast() -  Fastpath specific message handler
1098  * @context: HTT context
1099  * @cmpl_msdus: netbuf completions
1100  * @num_cmpls: number of completions to be handled
1101  *
1102  * Return: None
1103  */
1104 void htt_t2h_msg_handler_fast(void *context, qdf_nbuf_t *cmpl_msdus,
1105 			      uint32_t num_cmpls)
1106 {
1107 	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
1108 	qdf_nbuf_t htt_t2h_msg;
1109 	uint32_t *msg_word;
1110 	uint32_t i;
1111 	enum htt_t2h_msg_type msg_type;
1112 	uint32_t msg_len;
1113 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1114 
1115 	for (i = 0; i < num_cmpls; i++) {
1116 		htt_t2h_msg = cmpl_msdus[i];
1117 		msg_len = qdf_nbuf_len(htt_t2h_msg);
1118 
1119 		/*
1120 		 * Move the data pointer to point to HTT header
1121 		 * past the HTC header + HTC header alignment padding
1122 		 */
1123 		qdf_nbuf_pull_head(htt_t2h_msg, HTC_HEADER_LEN +
1124 				   HTC_HDR_ALIGNMENT_PADDING);
1125 
1126 		/* confirm alignment */
1127 		HTT_ASSERT3((((unsigned long) qdf_nbuf_data(htt_t2h_msg)) & 0x3)
1128 			    == 0);
1129 
1130 		msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
1131 		msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
1132 
1133 		switch (msg_type) {
1134 		case HTT_T2H_MSG_TYPE_RX_IND:
1135 		{
1136 			unsigned int num_mpdu_ranges;
1137 			unsigned int num_msdu_bytes;
1138 			unsigned int calculated_msg_len;
1139 			unsigned int rx_mpdu_range_offset_bytes;
1140 			u_int16_t peer_id;
1141 			u_int8_t tid;
1142 			msg_len = qdf_nbuf_len(htt_t2h_msg);
1143 
1144 			peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
1145 			tid = HTT_RX_IND_EXT_TID_GET(*msg_word);
1146 			if (tid >= OL_TXRX_NUM_EXT_TIDS) {
1147 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n",
1148 					tid);
1149 				WARN_ON(1);
1150 				break;
1151 			}
1152 			num_msdu_bytes =
1153 				HTT_RX_IND_FW_RX_DESC_BYTES_GET(
1154 				*(msg_word + 2 +
1155 				  HTT_RX_PPDU_DESC_SIZE32));
1156 			/*
1157 			 * 1 word for the message header,
1158 			 * HTT_RX_PPDU_DESC_SIZE32 words for the FW
1159 			 * rx PPDU desc.
1160 			 * 1 word to specify the number of MSDU bytes,
1161 			 * 1 word for every 4 MSDU bytes (round up),
1162 			 * 1 word for the MPDU range header
1163 			 */
1164 			rx_mpdu_range_offset_bytes =
1165 				(HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3);
1166 			if (qdf_unlikely(num_msdu_bytes >
1167 					 rx_mpdu_range_offset_bytes)) {
1168 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %u\n",
1169 					  "invalid num_msdu_bytes",
1170 					  num_msdu_bytes);
1171 				WARN_ON(1);
1172 				break;
1173 			}
1174 			pdev->rx_mpdu_range_offset_words =
1175 				rx_mpdu_range_offset_bytes >> 2;
1176 			num_mpdu_ranges =
1177 				HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word
1178 								 + 1));
1179 			pdev->rx_ind_msdu_byte_idx = 0;
1180 			if (qdf_unlikely(rx_mpdu_range_offset_bytes >
1181 					 msg_len)) {
1182 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %d\n",
1183 					  "invalid rx_mpdu_range_offset_words",
1184 					  pdev->rx_mpdu_range_offset_words);
1185 				WARN_ON(1);
1186 				break;
1187 			}
1188 			calculated_msg_len = rx_mpdu_range_offset_bytes +
1189 					     (num_mpdu_ranges *
1190 					     (int)sizeof(uint32_t));
1191 			/*
1192 			 * Check that the addition and multiplication
1193 			 * do not cause integer overflow
1194 			 */
1195 			if (qdf_unlikely(calculated_msg_len <
1196 					 rx_mpdu_range_offset_bytes)) {
1197 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %u\n",
1198 					  "invalid num_mpdu_ranges",
1199 					  (num_mpdu_ranges *
1200 					   (int)sizeof(uint32_t)));
1201 				WARN_ON(1);
1202 				break;
1203 			}
1204 			if (qdf_unlikely(calculated_msg_len > msg_len)) {
1205 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %u\n",
1206 					  "invalid offset_words + mpdu_ranges",
1207 					  calculated_msg_len);
1208 				WARN_ON(1);
1209 				break;
1210 			}
1211 			ol_rx_indication_handler(pdev->txrx_pdev, htt_t2h_msg,
1212 						 peer_id, tid, num_mpdu_ranges);
1213 			break;
1214 		}
1215 		case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
1216 		{
1217 			int num_msdus;
1218 			enum htt_tx_status status;
1219 
1220 			/* status - no enum translation needed */
1221 			status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
1222 			num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
1223 
1224 			/*
1225 			 * each desc id will occupy 2 bytes.
1226 			 * the 4 is for htt msg header
1227 			 */
1228 			if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
1229 				HTT_TX_COMPL_HEAD_SZ) > msg_len) {
1230 				qdf_print("%s: num_msdus(%d) is invalid,"
1231 					"adf_nbuf_len = %d\n",
1232 					__FUNCTION__,
1233 					num_msdus,
1234 					msg_len);
1235 				break;
1236 			}
1237 
1238 			if (num_msdus & 0x1) {
1239 				struct htt_tx_compl_ind_base *compl =
1240 					(void *)msg_word;
1241 
1242 				/*
1243 				 * Host CPU endianness can be different
1244 				 * from FW CPU. This can result in even
1245 				 * and odd MSDU IDs being switched. If
1246 				 * this happens, copy the switched final
1247 				 * odd MSDU ID from location
1248 				 * payload[size], to location
1249 				 * payload[size-1],where the message
1250 				 * handler function expects to find it
1251 				 */
1252 				if (compl->payload[num_msdus] !=
1253 				    HTT_TX_COMPL_INV_MSDU_ID) {
1254 					compl->payload[num_msdus - 1] =
1255 						compl->payload[num_msdus];
1256 				}
1257 			}
1258 			ol_tx_completion_handler(pdev->txrx_pdev, num_msdus,
1259 						 status, msg_word);
1260 
1261 			break;
1262 		}
1263 		case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
1264 		{
1265 			struct htt_tx_offload_deliver_ind_hdr_t
1266 							*offload_deliver_msg;
1267 			uint8_t vdev_id;
1268 			struct ol_txrx_vdev_t *vdev;
1269 			bool is_pkt_during_roam = false;
1270 			struct ol_txrx_pdev_t *txrx_pdev = pdev->txrx_pdev;
1271 			struct ol_txrx_peer_t *peer;
1272 			uint8_t bssid[QDF_MAC_ADDR_SIZE];
1273 			uint32_t freq = 0;
1274 
1275 			if (!(ucfg_pkt_capture_get_pktcap_mode((void *)soc->psoc) &
1276 			      PKT_CAPTURE_MODE_DATA_ONLY))
1277 				break;
1278 
1279 			offload_deliver_msg =
1280 			(struct htt_tx_offload_deliver_ind_hdr_t *)msg_word;
1281 			is_pkt_during_roam =
1282 			(offload_deliver_msg->reserved_2 ? true : false);
1283 
1284 			if (qdf_unlikely(
1285 				!pdev->cfg.is_full_reorder_offload)) {
1286 				break;
1287 			}
1288 
1289 			/* Is FW sends offload data during roaming */
1290 			if (is_pkt_during_roam) {
1291 				vdev_id = HTT_INVALID_VDEV;
1292 				freq =
1293 				(uint32_t)offload_deliver_msg->reserved_3;
1294 				htt_rx_mon_note_capture_channel(
1295 						pdev, cds_freq_to_chan(freq));
1296 			} else {
1297 				vdev_id = offload_deliver_msg->vdev_id;
1298 				vdev = (struct ol_txrx_vdev_t *)
1299 					ol_txrx_get_vdev_from_vdev_id(vdev_id);
1300 
1301 				if (vdev) {
1302 					qdf_spin_lock_bh(
1303 						&txrx_pdev->peer_ref_mutex);
1304 					peer = TAILQ_FIRST(&vdev->peer_list);
1305 					qdf_spin_unlock_bh(
1306 						&txrx_pdev->peer_ref_mutex);
1307 					if (peer) {
1308 						qdf_spin_lock_bh(
1309 							&peer->peer_info_lock);
1310 						qdf_mem_copy(
1311 							bssid,
1312 							&peer->mac_addr.raw,
1313 							QDF_MAC_ADDR_SIZE);
1314 						qdf_spin_unlock_bh(
1315 							&peer->peer_info_lock);
1316 					} else {
1317 						break;
1318 					}
1319 				} else {
1320 					break;
1321 				}
1322 			}
1323 			ucfg_pkt_capture_offload_deliver_indication_handler(
1324 							msg_word,
1325 							vdev_id, bssid, pdev);
1326 			break;
1327 		}
1328 		case HTT_T2H_MSG_TYPE_RX_PN_IND:
1329 		{
1330 			u_int16_t peer_id;
1331 			u_int8_t tid, pn_ie_cnt, *pn_ie = NULL;
1332 			int seq_num_start, seq_num_end;
1333 			int msg_len = qdf_nbuf_len(htt_t2h_msg);
1334 
1335 			if (msg_len < HTT_RX_PN_IND_BYTES) {
1336 				qdf_print("invalid nbuff len");
1337 				WARN_ON(1);
1338 				break;
1339 			}
1340 
1341 			/*First dword */
1342 			peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
1343 			tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);
1344 
1345 			msg_word++;
1346 			/*Second dword */
1347 			seq_num_start =
1348 				HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
1349 			seq_num_end =
1350 				HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
1351 			pn_ie_cnt =
1352 				HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);
1353 
1354 			if (msg_len - HTT_RX_PN_IND_BYTES <
1355 				pn_ie_cnt * sizeof(uint8_t)) {
1356 				qdf_print("invalid pn_ie len");
1357 				WARN_ON(1);
1358 				break;
1359 			}
1360 
1361 			msg_word++;
1362 			/*Third dword*/
1363 			if (pn_ie_cnt)
1364 				pn_ie = (u_int8_t *)msg_word;
1365 
1366 			ol_rx_pn_ind_handler(pdev->txrx_pdev, peer_id, tid,
1367 				seq_num_start, seq_num_end, pn_ie_cnt, pn_ie);
1368 
1369 			break;
1370 		}
1371 		case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1372 		{
1373 			int num_msdus;
1374 
1375 			num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
1376 			/*
1377 			 * each desc id will occupy 2 bytes.
1378 			 * the 4 is for htt msg header
1379 			 */
1380 			if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
1381 				HTT_TX_COMPL_HEAD_SZ) > msg_len) {
1382 				qdf_print("%s: num_msdus(%d) is invalid,"
1383 					"adf_nbuf_len = %d\n",
1384 					__FUNCTION__,
1385 					num_msdus,
1386 					msg_len);
1387 				break;
1388 			}
1389 
1390 			if (num_msdus & 0x1) {
1391 				struct htt_tx_compl_ind_base *compl =
1392 					(void *)msg_word;
1393 
1394 				/*
1395 				 * Host CPU endianness can be different
1396 				 * from FW CPU. This * can result in
1397 				 * even and odd MSDU IDs being switched.
1398 				 * If this happens, copy the switched
1399 				 * final odd MSDU ID from location
1400 				 * payload[size], to location
1401 				 * payload[size-1], where the message
1402 				 * handler function expects to find it
1403 				 */
1404 				if (compl->payload[num_msdus] !=
1405 				    HTT_TX_COMPL_INV_MSDU_ID) {
1406 					compl->payload[num_msdus - 1] =
1407 					compl->payload[num_msdus];
1408 				}
1409 			}
1410 			ol_tx_inspect_handler(pdev->txrx_pdev,
1411 					      num_msdus, msg_word + 1);
1412 			break;
1413 		}
1414 		case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
1415 		{
1416 			u_int16_t peer_id;
1417 			u_int8_t tid;
1418 			u_int8_t offload_ind, frag_ind;
1419 
1420 			if (qdf_unlikely(
1421 				  !pdev->cfg.is_full_reorder_offload)) {
1422 				qdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported when full reorder offload is disabled\n");
1423 				break;
1424 			}
1425 
1426 			if (qdf_unlikely(
1427 				pdev->txrx_pdev->cfg.is_high_latency)) {
1428 				qdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported on high latency\n");
1429 				break;
1430 			}
1431 
1432 			peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
1433 							*msg_word);
1434 			tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
1435 							*msg_word);
1436 			offload_ind =
1437 				HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(
1438 							*msg_word);
1439 			frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(
1440 							*msg_word);
1441 
1442 			if (qdf_unlikely(frag_ind)) {
1443 				ol_rx_frag_indication_handler(
1444 				pdev->txrx_pdev, htt_t2h_msg, peer_id,
1445 				tid);
1446 				break;
1447 			}
1448 
1449 			ol_rx_in_order_indication_handler(
1450 					pdev->txrx_pdev, htt_t2h_msg,
1451 					peer_id, tid, offload_ind);
1452 			break;
1453 		}
1454 		default:
1455 			htt_t2h_lp_msg_handler(context, htt_t2h_msg, false);
1456 			break;
1457 		};
1458 
1459 		/* Re-initialize the indication buffer */
1460 		HTT_T2H_MSG_BUF_REINIT(htt_t2h_msg, pdev->osdev);
1461 		qdf_nbuf_set_pktlen(htt_t2h_msg, 0);
1462 	}
1463 }
1464 #endif /* WLAN_FEATURE_FASTPATH */
1465 
1466 /*--- target->host HTT message Info Element access methods ------------------*/
1467 
1468 /*--- tx completion message ---*/
1469 
1470 uint16_t htt_tx_compl_desc_id(void *iterator, int num)
1471 {
1472 	/*
1473 	 * The MSDU IDs are packed , 2 per 32-bit word.
1474 	 * Iterate on them as an array of 16-bit elements.
1475 	 * This will work fine if the host endianness matches
1476 	 * the target endianness.
1477 	 * If the host endianness is opposite of the target's,
1478 	 * this iterator will produce descriptor IDs in a different
1479 	 * order than the target inserted them into the message -
1480 	 * if the target puts in [0, 1, 2, 3, ...] the host will
1481 	 * put out [1, 0, 3, 2, ...].
1482 	 * This is fine, except for the last ID if there are an
1483 	 * odd number of IDs.  But the TX_COMPL_IND handling code
1484 	 * in the htt_t2h_msg_handler already added a duplicate
1485 	 * of the final ID, if there were an odd number of IDs,
1486 	 * so this function can safely treat the IDs as an array
1487 	 * of 16-bit elements.
1488 	 */
1489 	return *(((uint16_t *) iterator) + num);
1490 }
1491 
1492 /*--- rx indication message ---*/
1493 
1494 int htt_rx_ind_flush(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1495 {
1496 	uint32_t *msg_word;
1497 
1498 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1499 	return HTT_RX_IND_FLUSH_VALID_GET(*msg_word);
1500 }
1501 
1502 void
1503 htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
1504 			       qdf_nbuf_t rx_ind_msg,
1505 			       unsigned int *seq_num_start,
1506 			       unsigned int *seq_num_end)
1507 {
1508 	uint32_t *msg_word;
1509 
1510 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1511 	msg_word++;
1512 	*seq_num_start = HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
1513 	*seq_num_end = HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
1514 }
1515 
1516 int htt_rx_ind_release(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1517 {
1518 	uint32_t *msg_word;
1519 
1520 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1521 	return HTT_RX_IND_REL_VALID_GET(*msg_word);
1522 }
1523 
1524 void
1525 htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
1526 				 qdf_nbuf_t rx_ind_msg,
1527 				 unsigned int *seq_num_start,
1528 				 unsigned int *seq_num_end)
1529 {
1530 	uint32_t *msg_word;
1531 
1532 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1533 	msg_word++;
1534 	*seq_num_start = HTT_RX_IND_REL_SEQ_NUM_START_GET(*msg_word);
1535 	*seq_num_end = HTT_RX_IND_REL_SEQ_NUM_END_GET(*msg_word);
1536 }
1537 
1538 void
1539 htt_rx_ind_mpdu_range_info(struct htt_pdev_t *pdev,
1540 			   qdf_nbuf_t rx_ind_msg,
1541 			   int mpdu_range_num,
1542 			   enum htt_rx_status *status, int *mpdu_count)
1543 {
1544 	uint32_t *msg_word;
1545 
1546 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1547 	msg_word += pdev->rx_mpdu_range_offset_words + mpdu_range_num;
1548 	*status = HTT_RX_IND_MPDU_STATUS_GET(*msg_word);
1549 	*mpdu_count = HTT_RX_IND_MPDU_COUNT_GET(*msg_word);
1550 }
1551 
1552 /**
1553  * htt_rx_ind_rssi_dbm() - Return the RSSI provided in a rx indication message.
1554  *
1555  * @pdev:       the HTT instance the rx data was received on
1556  * @rx_ind_msg: the netbuf containing the rx indication message
1557  *
1558  * Return the RSSI from an rx indication message, in dBm units.
1559  *
1560  * Return: RSSI in dBm, or HTT_INVALID_RSSI
1561  */
1562 int16_t htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1563 {
1564 	int8_t rssi;
1565 	uint32_t *msg_word;
1566 
1567 	msg_word = (uint32_t *)
1568 		   (qdf_nbuf_data(rx_ind_msg) +
1569 		    HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1570 
1571 	/* check if the RX_IND message contains valid rx PPDU start info */
1572 	if (!HTT_RX_IND_START_VALID_GET(*msg_word))
1573 		return HTT_RSSI_INVALID;
1574 
1575 	rssi = HTT_RX_IND_RSSI_CMB_GET(*msg_word);
1576 	return (HTT_TGT_RSSI_INVALID == rssi) ?
1577 	       HTT_RSSI_INVALID : rssi;
1578 }
1579 
1580 /**
1581  * htt_rx_ind_rssi_dbm_chain() - Return the RSSI for a chain provided in a rx
1582  *              indication message.
1583  * @pdev:       the HTT instance the rx data was received on
1584  * @rx_ind_msg: the netbuf containing the rx indication message
1585  * @chain:      the index of the chain (0-4)
1586  *
1587  * Return the RSSI for a chain from an rx indication message, in dBm units.
1588  *
1589  * Return: RSSI, or HTT_INVALID_RSSI
1590  */
1591 int16_t
1592 htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
1593 		      int8_t chain)
1594 {
1595 	int8_t rssi;
1596 	uint32_t *msg_word;
1597 
1598 	if (chain < 0 || chain > 3)
1599 		return HTT_RSSI_INVALID;
1600 
1601 	msg_word = (uint32_t *)
1602 		(qdf_nbuf_data(rx_ind_msg) +
1603 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1604 
1605 	/* check if the RX_IND message contains valid rx PPDU start info */
1606 	if (!HTT_RX_IND_START_VALID_GET(*msg_word))
1607 		return HTT_RSSI_INVALID;
1608 
1609 	msg_word += 1 + chain;
1610 
1611 	rssi = HTT_RX_IND_RSSI_PRI20_GET(*msg_word);
1612 	return (HTT_TGT_RSSI_INVALID == rssi) ?
1613 		HTT_RSSI_INVALID :
1614 		rssi;
1615 }
1616 
1617 /**
1618  * htt_rx_ind_legacy_rate() - Return the data rate
1619  * @pdev:        the HTT instance the rx data was received on
1620  * @rx_ind_msg:  the netbuf containing the rx indication message
1621  * @legacy_rate: (output) the data rate
1622  *      The legacy_rate parameter's value depends on the
1623  *      legacy_rate_sel value.
1624  *      If legacy_rate_sel is 0:
1625  *              0x8: OFDM 48 Mbps
1626  *              0x9: OFDM 24 Mbps
1627  *              0xA: OFDM 12 Mbps
1628  *              0xB: OFDM 6 Mbps
1629  *              0xC: OFDM 54 Mbps
1630  *              0xD: OFDM 36 Mbps
1631  *              0xE: OFDM 18 Mbps
1632  *              0xF: OFDM 9 Mbps
1633  *      If legacy_rate_sel is 1:
1634  *              0x8: CCK 11 Mbps long preamble
1635  *              0x9: CCK 5.5 Mbps long preamble
1636  *              0xA: CCK 2 Mbps long preamble
1637  *              0xB: CCK 1 Mbps long preamble
1638  *              0xC: CCK 11 Mbps short preamble
1639  *              0xD: CCK 5.5 Mbps short preamble
1640  *              0xE: CCK 2 Mbps short preamble
1641  *      -1 on error.
1642  * @legacy_rate_sel: (output) 0 to indicate OFDM, 1 to indicate CCK.
1643  *      -1 on error.
1644  *
1645  * Return the data rate provided in a rx indication message.
1646  */
1647 void
1648 htt_rx_ind_legacy_rate(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
1649 		       uint8_t *legacy_rate, uint8_t *legacy_rate_sel)
1650 {
1651 	uint32_t *msg_word;
1652 
1653 	msg_word = (uint32_t *)
1654 		(qdf_nbuf_data(rx_ind_msg) +
1655 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1656 
1657 	/* check if the RX_IND message contains valid rx PPDU start info */
1658 	if (!HTT_RX_IND_START_VALID_GET(*msg_word)) {
1659 		*legacy_rate = -1;
1660 		*legacy_rate_sel = -1;
1661 		return;
1662 	}
1663 
1664 	*legacy_rate = HTT_RX_IND_LEGACY_RATE_GET(*msg_word);
1665 	*legacy_rate_sel = HTT_RX_IND_LEGACY_RATE_SEL_GET(*msg_word);
1666 }
1667 
1668 /**
1669  * htt_rx_ind_timestamp() - Return the timestamp
1670  * @pdev:                  the HTT instance the rx data was received on
1671  * @rx_ind_msg:            the netbuf containing the rx indication message
1672  * @timestamp_microsec:    (output) the timestamp to microsecond resolution.
1673  *                         -1 on error.
1674  * @timestamp_submicrosec: the submicrosecond portion of the
1675  *                         timestamp. -1 on error.
1676  *
1677  * Return the timestamp provided in a rx indication message.
1678  */
1679 void
1680 htt_rx_ind_timestamp(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
1681 		     uint32_t *timestamp_microsec,
1682 		     uint8_t *timestamp_submicrosec)
1683 {
1684 	uint32_t *msg_word;
1685 
1686 	msg_word = (uint32_t *)
1687 		(qdf_nbuf_data(rx_ind_msg) +
1688 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1689 
1690 	/* check if the RX_IND message contains valid rx PPDU start info */
1691 	if (!HTT_RX_IND_END_VALID_GET(*msg_word)) {
1692 		*timestamp_microsec = -1;
1693 		*timestamp_submicrosec = -1;
1694 		return;
1695 	}
1696 
1697 	*timestamp_microsec = *(msg_word + 6);
1698 	*timestamp_submicrosec =
1699 		HTT_RX_IND_TIMESTAMP_SUBMICROSEC_GET(*msg_word);
1700 }
1701 
1702 #define INVALID_TSF -1
1703 /**
1704  * htt_rx_ind_tsf32() - Return the TSF timestamp
1705  * @pdev:       the HTT instance the rx data was received on
1706  * @rx_ind_msg: the netbuf containing the rx indication message
1707  *
1708  * Return the TSF timestamp provided in a rx indication message.
1709  *
1710  * Return: TSF timestamp
1711  */
1712 uint32_t
1713 htt_rx_ind_tsf32(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1714 {
1715 	uint32_t *msg_word;
1716 
1717 	msg_word = (uint32_t *)
1718 		(qdf_nbuf_data(rx_ind_msg) +
1719 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1720 
1721 	/* check if the RX_IND message contains valid rx PPDU start info */
1722 	if (!HTT_RX_IND_END_VALID_GET(*msg_word))
1723 		return INVALID_TSF;
1724 
1725 	return *(msg_word + 5);
1726 }
1727 
1728 /**
1729  * htt_rx_ind_ext_tid() - Return the extended traffic ID provided in a rx
1730  *			  indication message.
1731  * @pdev:       the HTT instance the rx data was received on
1732  * @rx_ind_msg: the netbuf containing the rx indication message
1733  *
1734  * Return the extended traffic ID in a rx indication message.
1735  *
1736  * Return: Extended TID
1737  */
1738 uint8_t
1739 htt_rx_ind_ext_tid(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1740 {
1741 	uint32_t *msg_word;
1742 
1743 	msg_word = (uint32_t *)
1744 		(qdf_nbuf_data(rx_ind_msg));
1745 
1746 	return HTT_RX_IND_EXT_TID_GET(*msg_word);
1747 }
1748 
1749 /*--- stats confirmation message ---*/
1750 
1751 void
1752 htt_t2h_dbg_stats_hdr_parse(uint8_t *stats_info_list,
1753 			    enum htt_dbg_stats_type *type,
1754 			    enum htt_dbg_stats_status *status,
1755 			    int *length, uint8_t **stats_data)
1756 {
1757 	uint32_t *msg_word = (uint32_t *) stats_info_list;
1758 	*type = HTT_T2H_STATS_CONF_TLV_TYPE_GET(*msg_word);
1759 	*status = HTT_T2H_STATS_CONF_TLV_STATUS_GET(*msg_word);
1760 	*length = HTT_T2H_STATS_CONF_TLV_HDR_SIZE +     /* header length */
1761 		HTT_T2H_STATS_CONF_TLV_LENGTH_GET(*msg_word); /* data len */
1762 	*stats_data = stats_info_list + HTT_T2H_STATS_CONF_TLV_HDR_SIZE;
1763 }
1764 
1765 void
1766 htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
1767 				    qdf_nbuf_t rx_frag_ind_msg,
1768 				    uint16_t *seq_num_start, uint16_t *seq_num_end)
1769 {
1770 	uint32_t *msg_word;
1771 
1772 	msg_word = (uint32_t *) qdf_nbuf_data(rx_frag_ind_msg);
1773 	msg_word++;
1774 	*seq_num_start = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
1775 	*seq_num_end = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
1776 }
1777