xref: /wlan-dirver/qcacld-3.0/core/dp/htt/htt_t2h.c (revision 2933838e5c361d2ceff0278c7bf3d087c9189fff)
1 /*
2  * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * @file htt_t2h.c
22  * @brief Provide functions to process target->host HTT messages.
23  * @details
24  *  This file contains functions related to target->host HTT messages.
25  *  There are two categories of functions:
26  *  1.  A function that receives a HTT message from HTC, and dispatches it
27  *      based on the HTT message type.
28  *  2.  functions that provide the info elements from specific HTT messages.
29  */
30 #include <wma.h>
31 #include <htc_api.h>            /* HTC_PACKET */
32 #include <htt.h>                /* HTT_T2H_MSG_TYPE, etc. */
33 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
34 
35 #include <ol_rx.h>
36 #include <ol_htt_rx_api.h>
37 #include <ol_htt_tx_api.h>
38 #include <ol_txrx_htt_api.h>    /* htt_tx_status */
39 
40 #include <htt_internal.h>       /* HTT_TX_SCHED, etc. */
41 #include <pktlog_ac_fmt.h>
42 #include <wdi_event.h>
43 #include <ol_htt_tx_api.h>
44 #include <ol_txrx_peer_find.h>
45 #include <cdp_txrx_ipa.h>
46 #include "pktlog_ac.h"
47 #include <cdp_txrx_handle.h>
48 #include <wlan_pkt_capture_ucfg_api.h>
49 #include <ol_txrx.h>
50 /*--- target->host HTT message dispatch function ----------------------------*/
51 
52 #ifndef DEBUG_CREDIT
53 #define DEBUG_CREDIT 0
54 #endif
55 
56 #if defined(CONFIG_HL_SUPPORT)
57 
58 
59 
60 /**
61  * htt_rx_frag_set_last_msdu() - set last msdu bit in rx descriptor
62  *				 for received frames
63  * @pdev: Handle (pointer) to HTT pdev.
64  * @msg: htt received msg
65  *
66  * Return: None
67  */
68 static inline
69 void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, qdf_nbuf_t msg)
70 {
71 }
72 #else
73 
74 static void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, qdf_nbuf_t msg)
75 {
76 	uint32_t *msg_word;
77 	unsigned int num_msdu_bytes;
78 	qdf_nbuf_t msdu;
79 	struct htt_host_rx_desc_base *rx_desc;
80 	int start_idx;
81 	uint8_t *p_fw_msdu_rx_desc = 0;
82 
83 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
84 	num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
85 		*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
86 	/*
87 	 * 1 word for the message header,
88 	 * 1 word to specify the number of MSDU bytes,
89 	 * 1 word for every 4 MSDU bytes (round up),
90 	 * 1 word for the MPDU range header
91 	 */
92 	pdev->rx_mpdu_range_offset_words = 3 + ((num_msdu_bytes + 3) >> 2);
93 	pdev->rx_ind_msdu_byte_idx = 0;
94 
95 	p_fw_msdu_rx_desc = ((uint8_t *) (msg_word) +
96 			     HTT_ENDIAN_BYTE_IDX_SWAP
97 				     (HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET));
98 
99 	/*
100 	 * Fix for EV126710, in which BSOD occurs due to last_msdu bit
101 	 * not set while the next pointer is deliberately set to NULL
102 	 * before calling ol_rx_pn_check_base()
103 	 *
104 	 * For fragment frames, the HW may not have set the last_msdu bit
105 	 * in the rx descriptor, but the SW expects this flag to be set,
106 	 * since each fragment is in a separate MPDU. Thus, set the flag here,
107 	 * just in case the HW didn't.
108 	 */
109 	start_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
110 	msdu = pdev->rx_ring.buf.netbufs_ring[start_idx];
111 	qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
112 	qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
113 	rx_desc = htt_rx_desc(msdu);
114 	*((uint8_t *) &rx_desc->fw_desc.u.val) = *p_fw_msdu_rx_desc;
115 	rx_desc->msdu_end.last_msdu = 1;
116 	qdf_nbuf_map(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
117 }
118 #endif
119 
120 static uint8_t *htt_t2h_mac_addr_deswizzle(uint8_t *tgt_mac_addr,
121 					   uint8_t *buffer)
122 {
123 #ifdef BIG_ENDIAN_HOST
124 	/*
125 	 * The host endianness is opposite of the target endianness.
126 	 * To make uint32_t elements come out correctly, the target->host
127 	 * upload has swizzled the bytes in each uint32_t element of the
128 	 * message.
129 	 * For byte-array message fields like the MAC address, this
130 	 * upload swizzling puts the bytes in the wrong order, and needs
131 	 * to be undone.
132 	 */
133 	buffer[0] = tgt_mac_addr[3];
134 	buffer[1] = tgt_mac_addr[2];
135 	buffer[2] = tgt_mac_addr[1];
136 	buffer[3] = tgt_mac_addr[0];
137 	buffer[4] = tgt_mac_addr[7];
138 	buffer[5] = tgt_mac_addr[6];
139 	return buffer;
140 #else
141 	/*
142 	 * The host endianness matches the target endianness -
143 	 * we can use the mac addr directly from the message buffer.
144 	 */
145 	return tgt_mac_addr;
146 #endif
147 }
148 
149 /**
150  * htt_ipa_op_response() - invoke an event handler from FW
151  * @pdev: Handle (pointer) to HTT pdev.
152  * @msg_word: htt msg
153  *
154  * Return: None
155  */
156 #ifdef IPA_OFFLOAD
157 static void htt_ipa_op_response(struct htt_pdev_t *pdev, uint32_t *msg_word)
158 {
159 	uint8_t op_code;
160 	uint16_t len;
161 	uint8_t *op_msg_buffer;
162 	uint8_t *msg_start_ptr;
163 
164 	htc_pm_runtime_put(pdev->htc_pdev);
165 	msg_start_ptr = (uint8_t *) msg_word;
166 	op_code =
167 		HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word);
168 	msg_word++;
169 	len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word);
170 
171 	op_msg_buffer =
172 		qdf_mem_malloc(sizeof
173 				(struct htt_wdi_ipa_op_response_t) +
174 				len);
175 	if (!op_msg_buffer)
176 		return;
177 
178 	qdf_mem_copy(op_msg_buffer,
179 			msg_start_ptr,
180 			sizeof(struct htt_wdi_ipa_op_response_t) +
181 			len);
182 	cdp_ipa_op_response(cds_get_context(QDF_MODULE_ID_SOC),
183 			    OL_TXRX_PDEV_ID, op_msg_buffer);
184 }
185 #else
186 static void htt_ipa_op_response(struct htt_pdev_t *pdev, uint32_t *msg_word)
187 {
188 }
189 #endif
190 
191 #ifndef QCN7605_SUPPORT
192 static int htt_t2h_adjust_bus_target_delta(struct htt_pdev_t *pdev,
193 					   int32_t htt_credit_delta)
194 {
195 	if (pdev->cfg.is_high_latency && !pdev->cfg.default_tx_comp_req) {
196 		HTT_TX_MUTEX_ACQUIRE(&pdev->credit_mutex);
197 		qdf_atomic_add(htt_credit_delta,
198 			       &pdev->htt_tx_credit.target_delta);
199 		htt_credit_delta = htt_tx_credit_update(pdev);
200 		HTT_TX_MUTEX_RELEASE(&pdev->credit_mutex);
201 	}
202 	return htt_credit_delta;
203 }
204 #else
205 static int htt_t2h_adjust_bus_target_delta(struct htt_pdev_t *pdev,
206 					   int32_t htt_credit_delta)
207 {
208 	return htt_credit_delta;
209 }
210 #endif
211 
212 #define MAX_TARGET_TX_CREDIT    204800
213 #define HTT_CFR_DUMP_COMPL_HEAD_SZ	4
214 
215 /* Target to host Msg/event  handler  for low priority messages*/
216 static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
217 				   bool free_msg_buf)
218 {
219 	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
220 	uint32_t *msg_word;
221 	enum htt_t2h_msg_type msg_type;
222 
223 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
224 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
225 	switch (msg_type) {
226 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
227 	{
228 		if (htc_dec_return_htt_runtime_cnt(pdev->htc_pdev) >= 0)
229 			htc_pm_runtime_put(pdev->htc_pdev);
230 
231 		pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
232 		pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
233 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
234 			  "target uses HTT version %d.%d; host uses %d.%d",
235 			  pdev->tgt_ver.major, pdev->tgt_ver.minor,
236 			  HTT_CURRENT_VERSION_MAJOR,
237 			  HTT_CURRENT_VERSION_MINOR);
238 		if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR)
239 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_WARN,
240 				  "*** Incompatible host/target HTT versions!");
241 		/* abort if the target is incompatible with the host */
242 		qdf_assert(pdev->tgt_ver.major ==
243 			   HTT_CURRENT_VERSION_MAJOR);
244 		if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
245 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
246 				  "*** Warning: host/target HTT versions are ");
247 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
248 				  "different, though compatible!");
249 		}
250 		break;
251 	}
252 	case HTT_T2H_MSG_TYPE_RX_FLUSH:
253 	{
254 		uint16_t peer_id;
255 		uint8_t tid;
256 		uint16_t seq_num_start, seq_num_end;
257 		enum htt_rx_flush_action action;
258 
259 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_RX_FLUSH_BYTES) {
260 			qdf_print("invalid nbuff len");
261 			WARN_ON(1);
262 			break;
263 		}
264 
265 		peer_id = HTT_RX_FLUSH_PEER_ID_GET(*msg_word);
266 		tid = HTT_RX_FLUSH_TID_GET(*msg_word);
267 		seq_num_start =
268 			HTT_RX_FLUSH_SEQ_NUM_START_GET(*(msg_word + 1));
269 		seq_num_end =
270 			HTT_RX_FLUSH_SEQ_NUM_END_GET(*(msg_word + 1));
271 		action =
272 			HTT_RX_FLUSH_MPDU_STATUS_GET(*(msg_word + 1)) ==
273 			1 ? htt_rx_flush_release : htt_rx_flush_discard;
274 		ol_rx_flush_handler(pdev->txrx_pdev, peer_id, tid,
275 				    seq_num_start, seq_num_end, action);
276 		break;
277 	}
278 	case HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND:
279 	{
280 		uint16_t msdu_cnt;
281 
282 		if (!pdev->cfg.is_high_latency &&
283 		    pdev->cfg.is_full_reorder_offload) {
284 			qdf_print("HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND not ");
285 			qdf_print("supported when full reorder offload is ");
286 			qdf_print("enabled in the configuration.\n");
287 			break;
288 		}
289 		msdu_cnt =
290 			HTT_RX_OFFLOAD_DELIVER_IND_MSDU_CNT_GET(*msg_word);
291 		ol_rx_offload_deliver_ind_handler(pdev->txrx_pdev,
292 						  htt_t2h_msg,
293 						  msdu_cnt);
294 		if (pdev->cfg.is_high_latency) {
295 			/*
296 			 * return here for HL to avoid double free on
297 			 * htt_t2h_msg
298 			 */
299 			return;
300 		}
301 		break;
302 	}
303 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND:
304 	{
305 		uint16_t peer_id;
306 		uint8_t tid;
307 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
308 
309 		if (msg_len < HTT_RX_FRAG_IND_BYTES) {
310 			qdf_print("invalid nbuff len");
311 			WARN_ON(1);
312 			break;
313 		}
314 		peer_id = HTT_RX_FRAG_IND_PEER_ID_GET(*msg_word);
315 		tid = HTT_RX_FRAG_IND_EXT_TID_GET(*msg_word);
316 		htt_rx_frag_set_last_msdu(pdev, htt_t2h_msg);
317 
318 		/* If packet len is invalid, will discard this frame. */
319 		if (pdev->cfg.is_high_latency) {
320 			u_int32_t rx_pkt_len = 0;
321 
322 			rx_pkt_len = qdf_nbuf_len(htt_t2h_msg);
323 
324 			if (rx_pkt_len < (HTT_RX_FRAG_IND_BYTES +
325 				sizeof(struct hl_htt_rx_ind_base)+
326 				sizeof(struct ieee80211_frame))) {
327 
328 				qdf_print("invalid packet len, %u", rx_pkt_len);
329 				/*
330 				 * This buf will be freed before
331 				 * exiting this function.
332 				 */
333 				break;
334 			}
335 		}
336 
337 		ol_rx_frag_indication_handler(pdev->txrx_pdev,
338 					      htt_t2h_msg,
339 					      peer_id, tid);
340 
341 		if (pdev->cfg.is_high_latency) {
342 			/*
343 			* For high latency solution,
344 			* HTT_T2H_MSG_TYPE_RX_FRAG_IND message and RX packet
345 			* share the same buffer. All buffer will be freed by
346 			* ol_rx_frag_indication_handler or upper layer to
347 			* avoid double free issue.
348 			*
349 			*/
350 			return;
351 		}
352 
353 		break;
354 	}
355 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
356 	{
357 		uint16_t peer_id;
358 		uint8_t tid;
359 		uint8_t win_sz;
360 		uint16_t start_seq_num;
361 
362 		/*
363 		 * FOR NOW, the host doesn't need to know the initial
364 		 * sequence number for rx aggregation.
365 		 * Thus, any value will do - specify 0.
366 		 */
367 		start_seq_num = 0;
368 		peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
369 		tid = HTT_RX_ADDBA_TID_GET(*msg_word);
370 		win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
371 		ol_rx_addba_handler(pdev->txrx_pdev, peer_id, tid,
372 				    win_sz, start_seq_num,
373 				    0 /* success */);
374 		break;
375 	}
376 	case HTT_T2H_MSG_TYPE_RX_DELBA:
377 	{
378 		uint16_t peer_id;
379 		uint8_t tid;
380 
381 		peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
382 		tid = HTT_RX_DELBA_TID_GET(*msg_word);
383 		ol_rx_delba_handler(pdev->txrx_pdev, peer_id, tid);
384 		break;
385 	}
386 	case HTT_T2H_MSG_TYPE_PEER_MAP:
387 	{
388 		uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
389 		uint8_t *peer_mac_addr;
390 		uint16_t peer_id;
391 		uint8_t vdev_id;
392 
393 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_RX_PEER_MAP_BYTES) {
394 			qdf_print("invalid nbuff len");
395 			WARN_ON(1);
396 			break;
397 		}
398 
399 		peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
400 		vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
401 		peer_mac_addr = htt_t2h_mac_addr_deswizzle(
402 			(uint8_t *) (msg_word + 1),
403 			&mac_addr_deswizzle_buf[0]);
404 
405 		if (peer_id > ol_cfg_max_peer_id(pdev->ctrl_pdev)) {
406 			qdf_print("%s: HTT_T2H_MSG_TYPE_PEER_MAP,"
407 				"invalid peer_id, %u\n",
408 				__FUNCTION__,
409 				peer_id);
410 			break;
411 		}
412 
413 		ol_rx_peer_map_handler(pdev->txrx_pdev, peer_id,
414 				       vdev_id, peer_mac_addr,
415 				       1 /*can tx */);
416 		break;
417 	}
418 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
419 	{
420 		uint16_t peer_id;
421 
422 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_RX_PEER_UNMAP_BYTES) {
423 			qdf_print("invalid nbuff len");
424 			WARN_ON(1);
425 			break;
426 		}
427 
428 		peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
429 		if (peer_id > ol_cfg_max_peer_id(pdev->ctrl_pdev)) {
430 			qdf_print("%s: HTT_T2H_MSG_TYPE_PEER_UNMAP,"
431 				"invalid peer_id, %u\n",
432 				__FUNCTION__,
433 				peer_id);
434 			break;
435 		}
436 
437 		ol_rx_peer_unmap_handler(pdev->txrx_pdev, peer_id);
438 		break;
439 	}
440 	case HTT_T2H_MSG_TYPE_SEC_IND:
441 	{
442 		uint16_t peer_id;
443 		enum htt_sec_type sec_type;
444 		int is_unicast;
445 
446 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_SEC_IND_BYTES) {
447 			qdf_print("invalid nbuff len");
448 			WARN_ON(1);
449 			break;
450 		}
451 
452 		peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
453 		sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
454 		is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
455 		msg_word++;   /* point to the first part of the Michael key */
456 		ol_rx_sec_ind_handler(pdev->txrx_pdev, peer_id,
457 				      sec_type, is_unicast, msg_word,
458 				      msg_word + 2);
459 		break;
460 	}
461 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND:
462 	{
463 		struct htt_mgmt_tx_compl_ind *compl_msg;
464 		int32_t credit_delta = 1;
465 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
466 		if (msg_len < (sizeof(struct htt_mgmt_tx_compl_ind) + sizeof(*msg_word))) {
467 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
468 				  "Invalid msg_word length in HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND");
469 			WARN_ON(1);
470 			break;
471 		}
472 
473 		compl_msg =
474 			(struct htt_mgmt_tx_compl_ind *)(msg_word + 1);
475 
476 		if (pdev->cfg.is_high_latency) {
477 			if (!pdev->cfg.default_tx_comp_req) {
478 				HTT_TX_MUTEX_ACQUIRE(&pdev->credit_mutex);
479 				qdf_atomic_add(credit_delta,
480 					       &pdev->htt_tx_credit.
481 								target_delta);
482 				credit_delta = htt_tx_credit_update(pdev);
483 				HTT_TX_MUTEX_RELEASE(&pdev->credit_mutex);
484 			}
485 			if (credit_delta)
486 				ol_tx_target_credit_update(
487 						pdev->txrx_pdev, credit_delta);
488 		}
489 		ol_tx_desc_update_group_credit(
490 			pdev->txrx_pdev, compl_msg->desc_id, 1,
491 			0, compl_msg->status);
492 
493 		DPTRACE(qdf_dp_trace_credit_record(QDF_TX_COMP, QDF_CREDIT_INC,
494 			1, qdf_atomic_read(&pdev->txrx_pdev->target_tx_credit),
495 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[0].credit),
496 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[1].credit)));
497 
498 		if (!ol_tx_get_is_mgmt_over_wmi_enabled()) {
499 			ol_tx_single_completion_handler(pdev->txrx_pdev,
500 							compl_msg->status,
501 							compl_msg->desc_id);
502 			htc_pm_runtime_put(pdev->htc_pdev);
503 			HTT_TX_SCHED(pdev);
504 		} else {
505 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
506 				  "Ignoring HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND indication");
507 		}
508 		break;
509 	}
510 	case HTT_T2H_MSG_TYPE_STATS_CONF:
511 	{
512 		uint8_t cookie;
513 		uint8_t *stats_info_list;
514 
515 		cookie = *(msg_word + 1);
516 
517 		stats_info_list = (uint8_t *) (msg_word + 3);
518 		htc_pm_runtime_put(pdev->htc_pdev);
519 		ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie,
520 					 stats_info_list);
521 		break;
522 	}
523 #ifndef REMOVE_PKT_LOG
524 	case HTT_T2H_MSG_TYPE_PKTLOG:
525 	{
526 		uint32_t len = qdf_nbuf_len(htt_t2h_msg);
527 
528 		if (len < sizeof(*msg_word) + sizeof(uint32_t)) {
529 			qdf_print("invalid nbuff len");
530 			WARN_ON(1);
531 			break;
532 		}
533 
534 		/*len is reduced by sizeof(*msg_word)*/
535 		pktlog_process_fw_msg(OL_TXRX_PDEV_ID, msg_word + 1,
536 				      len - sizeof(*msg_word));
537 		break;
538 	}
539 #endif
540 	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
541 	{
542 		uint32_t htt_credit_delta_abs;
543 		int32_t htt_credit_delta;
544 		int sign, old_credit;
545 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
546 
547 		if (msg_len < HTT_TX_CREDIT_MSG_BYTES) {
548 			qdf_print("invalid nbuff len");
549 			WARN_ON(1);
550 			break;
551 		}
552 
553 		htt_credit_delta_abs =
554 			HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word);
555 		sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1;
556 		htt_credit_delta = sign * htt_credit_delta_abs;
557 
558 		old_credit = qdf_atomic_read(&pdev->htt_tx_credit.target_delta);
559 		if (((old_credit + htt_credit_delta) > MAX_TARGET_TX_CREDIT) ||
560 			((old_credit + htt_credit_delta) < -MAX_TARGET_TX_CREDIT)) {
561 			qdf_err("invalid update,old_credit=%d, htt_credit_delta=%d",
562 				old_credit, htt_credit_delta);
563 			break;
564 		}
565 		htt_credit_delta =
566 		htt_t2h_adjust_bus_target_delta(pdev, htt_credit_delta);
567 		htt_tx_group_credit_process(pdev, msg_word);
568 		DPTRACE(qdf_dp_trace_credit_record(QDF_TX_CREDIT_UPDATE,
569 			QDF_CREDIT_INC,	htt_credit_delta,
570 			qdf_atomic_read(&pdev->txrx_pdev->target_tx_credit) +
571 			htt_credit_delta,
572 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[0].credit),
573 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[1].credit)));
574 
575 		ol_tx_credit_completion_handler(pdev->txrx_pdev,
576 						htt_credit_delta);
577 		break;
578 	}
579 
580 	case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE:
581 	{
582 		uint16_t len;
583 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
584 		len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*(msg_word + 1));
585 
586 		if (sizeof(struct htt_wdi_ipa_op_response_t) + len > msg_len) {
587 			qdf_print("Invalid buf len size %zu len %d, msg_len %d",
588 				  sizeof(struct htt_wdi_ipa_op_response_t),
589 				  len, msg_len);
590 			WARN_ON(1);
591 			break;
592 		}
593 		htt_ipa_op_response(pdev, msg_word);
594 		break;
595 	}
596 
597 	case HTT_T2H_MSG_TYPE_FLOW_POOL_MAP:
598 	{
599 		uint8_t num_flows;
600 		struct htt_flow_pool_map_payload_t *pool_map_payoad;
601 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
602 
603 		num_flows = HTT_FLOW_POOL_MAP_NUM_FLOWS_GET(*msg_word);
604 
605 		if (((HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
606 			HTT_FLOW_POOL_MAP_HEADER_SZ) * num_flows + 1) * sizeof(*msg_word) > msg_len) {
607 			qdf_print("Invalid num_flows");
608 			WARN_ON(1);
609 			break;
610 		}
611 
612 		msg_word++;
613 		while (num_flows) {
614 			pool_map_payoad = (struct htt_flow_pool_map_payload_t *)
615 								msg_word;
616 			ol_tx_flow_pool_map_handler(pool_map_payoad->flow_id,
617 					pool_map_payoad->flow_type,
618 					pool_map_payoad->flow_pool_id,
619 					pool_map_payoad->flow_pool_size);
620 
621 			msg_word += (HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
622 						 HTT_FLOW_POOL_MAP_HEADER_SZ);
623 			num_flows--;
624 		}
625 		break;
626 	}
627 
628 	case HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP:
629 	{
630 		struct htt_flow_pool_unmap_t *pool_numap_payload;
631 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
632 
633 		if (msg_len < sizeof(struct htt_flow_pool_unmap_t)) {
634 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
635 				  "Invalid msg_word length in HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP");
636 			WARN_ON(1);
637 			break;
638 		}
639 
640 		pool_numap_payload = (struct htt_flow_pool_unmap_t *)msg_word;
641 		ol_tx_flow_pool_unmap_handler(pool_numap_payload->flow_id,
642 					pool_numap_payload->flow_type,
643 					pool_numap_payload->flow_pool_id);
644 		break;
645 	}
646 
647 	case HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE:
648 	{
649 		struct htt_flow_pool_resize_t *msg;
650 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
651 
652 		if (msg_len < sizeof(struct htt_flow_pool_resize_t)) {
653 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
654 				  "Invalid msg_word length in HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE");
655 			WARN_ON(1);
656 			break;
657 		}
658 
659 		msg = (struct htt_flow_pool_resize_t *)msg_word;
660 		ol_tx_flow_pool_resize_handler(msg->flow_pool_id,
661 					       msg->flow_pool_new_size);
662 
663 		break;
664 	}
665 
666 	case HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR:
667 	{
668 		switch (HTT_RX_OFLD_PKT_ERR_MSG_SUB_TYPE_GET(*msg_word)) {
669 		case HTT_RX_OFLD_PKT_ERR_TYPE_MIC_ERR:
670 		{
671 			struct ol_txrx_vdev_t *vdev;
672 			struct ol_txrx_peer_t *peer;
673 			uint64_t pn;
674 			uint32_t key_id;
675 			uint16_t peer_id;
676 			int msg_len = qdf_nbuf_len(htt_t2h_msg);
677 
678 			if (msg_len < HTT_RX_OFLD_PKT_ERR_MIC_ERR_BYTES) {
679 				qdf_print("invalid nbuff len");
680 				WARN_ON(1);
681 				break;
682 			}
683 
684 			peer_id = HTT_RX_OFLD_PKT_ERR_MIC_ERR_PEER_ID_GET
685 				(*(msg_word + 1));
686 
687 			peer = ol_txrx_peer_find_by_id(pdev->txrx_pdev,
688 				 peer_id);
689 			if (!peer) {
690 				qdf_print("invalid peer id %d", peer_id);
691 				qdf_assert(0);
692 				break;
693 			}
694 			vdev = peer->vdev;
695 			key_id = HTT_RX_OFLD_PKT_ERR_MIC_ERR_KEYID_GET
696 				(*(msg_word + 1));
697 			qdf_mem_copy(&pn, (uint8_t *)(msg_word + 6), 6);
698 
699 			ol_rx_send_mic_err_ind(vdev->pdev, vdev->vdev_id,
700 					       peer->mac_addr.raw, 0, 0,
701 					       OL_RX_ERR_TKIP_MIC, htt_t2h_msg,
702 					       &pn, key_id);
703 			break;
704 		}
705 		default:
706 		{
707 			qdf_print("unhandled error type %d",
708 			    HTT_RX_OFLD_PKT_ERR_MSG_SUB_TYPE_GET(*msg_word));
709 		break;
710 		}
711 		}
712 	}
713 #ifdef WLAN_CFR_ENABLE
714 	case HTT_T2H_MSG_TYPE_CFR_DUMP_COMPL_IND:
715 	{
716 		int expected_len;
717 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
718 
719 		expected_len = HTT_CFR_DUMP_COMPL_HEAD_SZ +
720 				sizeof(struct htt_cfr_dump_compl_ind);
721 		if (msg_len < expected_len) {
722 			qdf_print("Invalid length of CFR capture event");
723 			break;
724 		}
725 
726 		ol_rx_cfr_capture_msg_handler(htt_t2h_msg);
727 		break;
728 	}
729 #endif
730 	default:
731 		break;
732 	};
733 	/* Free the indication buffer */
734 	if (free_msg_buf)
735 		qdf_nbuf_free(htt_t2h_msg);
736 }
737 
738 #define HTT_TX_COMPL_HEAD_SZ			4
739 #define HTT_TX_COMPL_BYTES_PER_MSDU_ID		2
740 
741 /**
742  * Generic Target to host Msg/event  handler  for low priority messages
743  * Low priority message are handler in a different handler called from
744  * this function . So that the most likely success path like Rx and
745  * Tx comp   has little code   foot print
746  */
747 void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
748 {
749 	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
750 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
751 	uint32_t *msg_word;
752 	enum htt_t2h_msg_type msg_type;
753 
754 	/* check for successful message reception */
755 	if (pkt->Status != QDF_STATUS_SUCCESS) {
756 		if (pkt->Status != QDF_STATUS_E_CANCELED)
757 			pdev->stats.htc_err_cnt++;
758 		qdf_nbuf_free(htt_t2h_msg);
759 		return;
760 	}
761 #ifdef HTT_RX_RESTORE
762 	if (qdf_unlikely(pdev->rx_ring.rx_reset)) {
763 		qdf_print("rx restore ..\n");
764 		qdf_nbuf_free(htt_t2h_msg);
765 		return;
766 	}
767 #endif
768 
769 	/* confirm alignment */
770 	HTT_ASSERT3((((unsigned long)qdf_nbuf_data(htt_t2h_msg)) & 0x3) == 0);
771 
772 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
773 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
774 
775 #if defined(HELIUMPLUS_DEBUG)
776 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
777 		  "%s %d: msg_word 0x%x msg_type %d", __func__, __LINE__,
778 		  *msg_word, msg_type);
779 #endif
780 
781 	switch (msg_type) {
782 	case HTT_T2H_MSG_TYPE_RX_IND:
783 	{
784 		unsigned int num_mpdu_ranges;
785 		unsigned int num_msdu_bytes;
786 		unsigned int calculated_msg_len;
787 		unsigned int rx_mpdu_range_offset_bytes;
788 		uint16_t peer_id;
789 		uint8_t tid;
790 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
791 
792 		if (qdf_unlikely(pdev->cfg.is_full_reorder_offload)) {
793 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND not supported ");
794 			qdf_print("with full reorder offload\n");
795 			break;
796 		}
797 		peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
798 		tid = HTT_RX_IND_EXT_TID_GET(*msg_word);
799 
800 		if (tid >= OL_TXRX_NUM_EXT_TIDS) {
801 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n",
802 				tid);
803 			break;
804 		}
805 		if (msg_len < (2 + HTT_RX_PPDU_DESC_SIZE32 + 1) * sizeof(uint32_t)) {
806 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid msg_len\n");
807 			break;
808 		}
809 		num_msdu_bytes =
810 			HTT_RX_IND_FW_RX_DESC_BYTES_GET(
811 				*(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32));
812 		/*
813 		 * 1 word for the message header,
814 		 * HTT_RX_PPDU_DESC_SIZE32 words for the FW rx PPDU desc
815 		 * 1 word to specify the number of MSDU bytes,
816 		 * 1 word for every 4 MSDU bytes (round up),
817 		 * 1 word for the MPDU range header
818 		 */
819 		rx_mpdu_range_offset_bytes =
820 			(HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3);
821 		if (qdf_unlikely(num_msdu_bytes >
822 				 rx_mpdu_range_offset_bytes)) {
823 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %u\n",
824 				  "num_msdu_bytes",
825 				  num_msdu_bytes);
826 			WARN_ON(1);
827 			break;
828 		}
829 		pdev->rx_mpdu_range_offset_words =
830 			rx_mpdu_range_offset_bytes >> 2;
831 		num_mpdu_ranges =
832 			HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
833 		pdev->rx_ind_msdu_byte_idx = 0;
834 		if (qdf_unlikely(rx_mpdu_range_offset_bytes >
835 		    msg_len)) {
836 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %d\n",
837 				  "rx_mpdu_range_offset_words",
838 				  pdev->rx_mpdu_range_offset_words);
839 			WARN_ON(1);
840 			break;
841 		}
842 		calculated_msg_len = rx_mpdu_range_offset_bytes +
843 			(num_mpdu_ranges * (int)sizeof(uint32_t));
844 		/*
845 		 * Check that the addition and multiplication
846 		 * do not cause integer overflow
847 		 */
848 		if (qdf_unlikely(calculated_msg_len <
849 		    rx_mpdu_range_offset_bytes)) {
850 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %u\n",
851 				  "num_mpdu_ranges",
852 				  (num_mpdu_ranges * (int)sizeof(uint32_t)));
853 			WARN_ON(1);
854 			break;
855 		}
856 		if (qdf_unlikely(calculated_msg_len > msg_len)) {
857 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %u\n",
858 				  "offset_words + mpdu_ranges",
859 				  calculated_msg_len);
860 			WARN_ON(1);
861 			break;
862 		}
863 		ol_rx_indication_handler(pdev->txrx_pdev,
864 					 htt_t2h_msg, peer_id,
865 					 tid, num_mpdu_ranges);
866 
867 		if (pdev->cfg.is_high_latency)
868 			return;
869 
870 		break;
871 	}
872 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
873 	{
874 		int old_credit;
875 		int num_msdus;
876 		enum htt_tx_status status;
877 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
878 
879 		/* status - no enum translation needed */
880 		status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
881 		num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
882 
883 		/*
884 		 * each desc id will occupy 2 bytes.
885 		 * the 4 is for htt msg header
886 		 */
887 		if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
888 			HTT_TX_COMPL_HEAD_SZ) > msg_len) {
889 			qdf_print("%s: num_msdus(%d) is invalid,"
890 				"adf_nbuf_len = %d\n",
891 				__FUNCTION__,
892 				num_msdus,
893 				msg_len);
894 			break;
895 		}
896 
897 		if (num_msdus & 0x1) {
898 			struct htt_tx_compl_ind_base *compl =
899 				(void *)msg_word;
900 
901 			/*
902 			 * Host CPU endianness can be different from FW CPU.
903 			 * This can result in even and odd MSDU IDs being
904 			 * switched. If this happens, copy the switched final
905 			 * odd MSDU ID from location payload[size], to
906 			 * location payload[size-1], where the message
907 			 * handler function expects to find it
908 			 */
909 			if (compl->payload[num_msdus] !=
910 			    HTT_TX_COMPL_INV_MSDU_ID) {
911 				compl->payload[num_msdus - 1] =
912 					compl->payload[num_msdus];
913 			}
914 		}
915 
916 		if (pdev->cfg.is_high_latency &&
917 		    !pdev->cfg.credit_update_enabled) {
918 			old_credit = qdf_atomic_read(
919 						&pdev->htt_tx_credit.target_delta);
920 			if (((old_credit + num_msdus) > MAX_TARGET_TX_CREDIT) ||
921 				((old_credit + num_msdus) < -MAX_TARGET_TX_CREDIT)) {
922 				qdf_err("invalid update,old_credit=%d, num_msdus=%d",
923 					old_credit, num_msdus);
924 			} else {
925 				if (!pdev->cfg.default_tx_comp_req) {
926 					int credit_delta;
927 
928 					HTT_TX_MUTEX_ACQUIRE(&pdev->credit_mutex);
929 					qdf_atomic_add(num_msdus,
930 						       &pdev->htt_tx_credit.
931 							target_delta);
932 					credit_delta = htt_tx_credit_update(pdev);
933 					HTT_TX_MUTEX_RELEASE(&pdev->credit_mutex);
934 
935 					if (credit_delta) {
936 						ol_tx_target_credit_update(
937 								pdev->txrx_pdev,
938 								credit_delta);
939 					}
940 				} else {
941 					ol_tx_target_credit_update(pdev->txrx_pdev,
942 								   num_msdus);
943 				}
944 			}
945 		}
946 
947 		ol_tx_completion_handler(pdev->txrx_pdev, num_msdus,
948 					 status, msg_word);
949 		HTT_TX_SCHED(pdev);
950 		break;
951 	}
952 	case HTT_T2H_MSG_TYPE_RX_PN_IND:
953 	{
954 		uint16_t peer_id;
955 		uint8_t tid, pn_ie_cnt, *pn_ie = NULL;
956 		uint16_t seq_num_start, seq_num_end;
957 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
958 
959 		if (msg_len < HTT_RX_PN_IND_BYTES) {
960 			qdf_print("invalid nbuff len");
961 			WARN_ON(1);
962 			break;
963 		}
964 
965 		/*First dword */
966 		peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
967 		tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);
968 
969 		msg_word++;
970 		/*Second dword */
971 		seq_num_start =
972 			HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
973 		seq_num_end = HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
974 		pn_ie_cnt = HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);
975 
976 		if (msg_len - HTT_RX_PN_IND_BYTES <
977 		    pn_ie_cnt * sizeof(uint8_t)) {
978 			qdf_print("invalid pn_ie count");
979 			WARN_ON(1);
980 			break;
981 		}
982 
983 		msg_word++;
984 		/*Third dword */
985 		if (pn_ie_cnt)
986 			pn_ie = (uint8_t *) msg_word;
987 
988 		ol_rx_pn_ind_handler(pdev->txrx_pdev, peer_id, tid,
989 				     seq_num_start, seq_num_end,
990 				     pn_ie_cnt, pn_ie);
991 
992 		break;
993 	}
994 	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
995 	{
996 		int num_msdus;
997 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
998 
999 		num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
1000 		/*
1001 		 * each desc id will occupy 2 bytes.
1002 		 * the 4 is for htt msg header
1003 		 */
1004 		if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
1005 			HTT_TX_COMPL_HEAD_SZ) > msg_len) {
1006 			qdf_print("%s: num_msdus(%d) is invalid,"
1007 				"adf_nbuf_len = %d\n",
1008 				__FUNCTION__,
1009 				num_msdus,
1010 				msg_len);
1011 			break;
1012 		}
1013 
1014 		if (num_msdus & 0x1) {
1015 			struct htt_tx_compl_ind_base *compl =
1016 				(void *)msg_word;
1017 
1018 			/*
1019 			 * Host CPU endianness can be different from FW CPU.
1020 			 * This can result in even and odd MSDU IDs being
1021 			 * switched. If this happens, copy the switched final
1022 			 * odd MSDU ID from location payload[size], to
1023 			 * location payload[size-1], where the message handler
1024 			 * function expects to find it
1025 			 */
1026 			if (compl->payload[num_msdus] !=
1027 			    HTT_TX_COMPL_INV_MSDU_ID) {
1028 				compl->payload[num_msdus - 1] =
1029 					compl->payload[num_msdus];
1030 			}
1031 		}
1032 		ol_tx_inspect_handler(pdev->txrx_pdev, num_msdus,
1033 				      msg_word + 1);
1034 		HTT_TX_SCHED(pdev);
1035 		break;
1036 	}
1037 	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
1038 	{
1039 		uint16_t peer_id;
1040 		uint8_t tid;
1041 		uint8_t offload_ind, frag_ind;
1042 
1043 		if (qdf_unlikely(!pdev->cfg.is_full_reorder_offload)) {
1044 			qdf_print("full reorder offload is disable");
1045 			break;
1046 		}
1047 
1048 		if (qdf_unlikely(pdev->cfg.is_high_latency)) {
1049 			qdf_print("full reorder offload not support in HL");
1050 			break;
1051 		}
1052 
1053 		peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word);
1054 		tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word);
1055 		offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
1056 		frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
1057 
1058 #if defined(HELIUMPLUS_DEBUG)
1059 		qdf_print("peerid %d tid %d offloadind %d fragind %d",
1060 			  peer_id, tid, offload_ind,
1061 			  frag_ind);
1062 #endif
1063 		if (qdf_unlikely(frag_ind)) {
1064 			ol_rx_frag_indication_handler(pdev->txrx_pdev,
1065 						      htt_t2h_msg,
1066 						      peer_id, tid);
1067 			break;
1068 		}
1069 
1070 		ol_rx_in_order_indication_handler(pdev->txrx_pdev,
1071 						  htt_t2h_msg, peer_id,
1072 						  tid, offload_ind);
1073 		break;
1074 	}
1075 
1076 	default:
1077 		htt_t2h_lp_msg_handler(context, htt_t2h_msg, true);
1078 		return;
1079 
1080 	};
1081 
1082 	/* Free the indication buffer */
1083 	qdf_nbuf_free(htt_t2h_msg);
1084 }
1085 
1086 #ifdef WLAN_FEATURE_FASTPATH
1087 #define HTT_T2H_MSG_BUF_REINIT(_buf, dev)				\
1088 	do {								\
1089 		QDF_NBUF_CB_PADDR(_buf) -= (HTC_HEADER_LEN +		\
1090 					HTC_HDR_ALIGNMENT_PADDING);	\
1091 		qdf_nbuf_init_fast((_buf));				\
1092 		qdf_mem_dma_sync_single_for_device(dev,			\
1093 					(QDF_NBUF_CB_PADDR(_buf)),	\
1094 					(skb_end_pointer(_buf) -	\
1095 					(_buf)->data),			\
1096 					PCI_DMA_FROMDEVICE);		\
1097 	} while (0)
1098 
1099 /**
1100  * htt_t2h_msg_handler_fast() -  Fastpath specific message handler
1101  * @context: HTT context
1102  * @cmpl_msdus: netbuf completions
1103  * @num_cmpls: number of completions to be handled
1104  *
1105  * Return: None
1106  */
1107 void htt_t2h_msg_handler_fast(void *context, qdf_nbuf_t *cmpl_msdus,
1108 			      uint32_t num_cmpls)
1109 {
1110 	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
1111 	qdf_nbuf_t htt_t2h_msg;
1112 	uint32_t *msg_word;
1113 	uint32_t i;
1114 	enum htt_t2h_msg_type msg_type;
1115 	uint32_t msg_len;
1116 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1117 
1118 	for (i = 0; i < num_cmpls; i++) {
1119 		htt_t2h_msg = cmpl_msdus[i];
1120 		msg_len = qdf_nbuf_len(htt_t2h_msg);
1121 
1122 		/*
1123 		 * Move the data pointer to point to HTT header
1124 		 * past the HTC header + HTC header alignment padding
1125 		 */
1126 		qdf_nbuf_pull_head(htt_t2h_msg, HTC_HEADER_LEN +
1127 				   HTC_HDR_ALIGNMENT_PADDING);
1128 
1129 		/* confirm alignment */
1130 		HTT_ASSERT3((((unsigned long) qdf_nbuf_data(htt_t2h_msg)) & 0x3)
1131 			    == 0);
1132 
1133 		msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
1134 		msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
1135 
1136 		switch (msg_type) {
1137 		case HTT_T2H_MSG_TYPE_RX_IND:
1138 		{
1139 			unsigned int num_mpdu_ranges;
1140 			unsigned int num_msdu_bytes;
1141 			unsigned int calculated_msg_len;
1142 			unsigned int rx_mpdu_range_offset_bytes;
1143 			u_int16_t peer_id;
1144 			u_int8_t tid;
1145 			msg_len = qdf_nbuf_len(htt_t2h_msg);
1146 
1147 			peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
1148 			tid = HTT_RX_IND_EXT_TID_GET(*msg_word);
1149 			if (tid >= OL_TXRX_NUM_EXT_TIDS) {
1150 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n",
1151 					tid);
1152 				WARN_ON(1);
1153 				break;
1154 			}
1155 			num_msdu_bytes =
1156 				HTT_RX_IND_FW_RX_DESC_BYTES_GET(
1157 				*(msg_word + 2 +
1158 				  HTT_RX_PPDU_DESC_SIZE32));
1159 			/*
1160 			 * 1 word for the message header,
1161 			 * HTT_RX_PPDU_DESC_SIZE32 words for the FW
1162 			 * rx PPDU desc.
1163 			 * 1 word to specify the number of MSDU bytes,
1164 			 * 1 word for every 4 MSDU bytes (round up),
1165 			 * 1 word for the MPDU range header
1166 			 */
1167 			rx_mpdu_range_offset_bytes =
1168 				(HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3);
1169 			if (qdf_unlikely(num_msdu_bytes >
1170 					 rx_mpdu_range_offset_bytes)) {
1171 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %u\n",
1172 					  "invalid num_msdu_bytes",
1173 					  num_msdu_bytes);
1174 				WARN_ON(1);
1175 				break;
1176 			}
1177 			pdev->rx_mpdu_range_offset_words =
1178 				rx_mpdu_range_offset_bytes >> 2;
1179 			num_mpdu_ranges =
1180 				HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word
1181 								 + 1));
1182 			pdev->rx_ind_msdu_byte_idx = 0;
1183 			if (qdf_unlikely(rx_mpdu_range_offset_bytes >
1184 					 msg_len)) {
1185 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %d\n",
1186 					  "invalid rx_mpdu_range_offset_words",
1187 					  pdev->rx_mpdu_range_offset_words);
1188 				WARN_ON(1);
1189 				break;
1190 			}
1191 			calculated_msg_len = rx_mpdu_range_offset_bytes +
1192 					     (num_mpdu_ranges *
1193 					     (int)sizeof(uint32_t));
1194 			/*
1195 			 * Check that the addition and multiplication
1196 			 * do not cause integer overflow
1197 			 */
1198 			if (qdf_unlikely(calculated_msg_len <
1199 					 rx_mpdu_range_offset_bytes)) {
1200 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %u\n",
1201 					  "invalid num_mpdu_ranges",
1202 					  (num_mpdu_ranges *
1203 					   (int)sizeof(uint32_t)));
1204 				WARN_ON(1);
1205 				break;
1206 			}
1207 			if (qdf_unlikely(calculated_msg_len > msg_len)) {
1208 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %u\n",
1209 					  "invalid offset_words + mpdu_ranges",
1210 					  calculated_msg_len);
1211 				WARN_ON(1);
1212 				break;
1213 			}
1214 			ol_rx_indication_handler(pdev->txrx_pdev, htt_t2h_msg,
1215 						 peer_id, tid, num_mpdu_ranges);
1216 			break;
1217 		}
1218 		case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
1219 		{
1220 			int num_msdus;
1221 			enum htt_tx_status status;
1222 
1223 			/* status - no enum translation needed */
1224 			status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
1225 			num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
1226 
1227 			/*
1228 			 * each desc id will occupy 2 bytes.
1229 			 * the 4 is for htt msg header
1230 			 */
1231 			if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
1232 				HTT_TX_COMPL_HEAD_SZ) > msg_len) {
1233 				qdf_print("%s: num_msdus(%d) is invalid,"
1234 					"adf_nbuf_len = %d\n",
1235 					__FUNCTION__,
1236 					num_msdus,
1237 					msg_len);
1238 				break;
1239 			}
1240 
1241 			if (num_msdus & 0x1) {
1242 				struct htt_tx_compl_ind_base *compl =
1243 					(void *)msg_word;
1244 
1245 				/*
1246 				 * Host CPU endianness can be different
1247 				 * from FW CPU. This can result in even
1248 				 * and odd MSDU IDs being switched. If
1249 				 * this happens, copy the switched final
1250 				 * odd MSDU ID from location
1251 				 * payload[size], to location
1252 				 * payload[size-1],where the message
1253 				 * handler function expects to find it
1254 				 */
1255 				if (compl->payload[num_msdus] !=
1256 				    HTT_TX_COMPL_INV_MSDU_ID) {
1257 					compl->payload[num_msdus - 1] =
1258 						compl->payload[num_msdus];
1259 				}
1260 			}
1261 			ol_tx_completion_handler(pdev->txrx_pdev, num_msdus,
1262 						 status, msg_word);
1263 
1264 			break;
1265 		}
1266 		case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
1267 		{
1268 			struct htt_tx_offload_deliver_ind_hdr_t
1269 							*offload_deliver_msg;
1270 			uint8_t vdev_id;
1271 			struct ol_txrx_vdev_t *vdev;
1272 			bool is_pkt_during_roam = false;
1273 			struct ol_txrx_pdev_t *txrx_pdev = pdev->txrx_pdev;
1274 			struct ol_txrx_peer_t *peer;
1275 			uint8_t bssid[QDF_MAC_ADDR_SIZE];
1276 			uint32_t freq = 0;
1277 
1278 			if (!(ucfg_pkt_capture_get_pktcap_mode((void *)soc->psoc) &
1279 			      PKT_CAPTURE_MODE_DATA_ONLY))
1280 				break;
1281 
1282 			offload_deliver_msg =
1283 			(struct htt_tx_offload_deliver_ind_hdr_t *)msg_word;
1284 			is_pkt_during_roam =
1285 			(offload_deliver_msg->reserved_2 ? true : false);
1286 
1287 			if (qdf_unlikely(
1288 				!pdev->cfg.is_full_reorder_offload)) {
1289 				break;
1290 			}
1291 
1292 			/* Is FW sends offload data during roaming */
1293 			if (is_pkt_during_roam) {
1294 				vdev_id = HTT_INVALID_VDEV;
1295 				freq =
1296 				(uint32_t)offload_deliver_msg->reserved_3;
1297 				htt_rx_mon_note_capture_channel(
1298 						pdev, cds_freq_to_chan(freq));
1299 			} else {
1300 				vdev_id = offload_deliver_msg->vdev_id;
1301 				vdev = (struct ol_txrx_vdev_t *)
1302 					ol_txrx_get_vdev_from_vdev_id(vdev_id);
1303 
1304 				if (vdev) {
1305 					qdf_spin_lock_bh(
1306 						&txrx_pdev->peer_ref_mutex);
1307 					peer = TAILQ_FIRST(&vdev->peer_list);
1308 					qdf_spin_unlock_bh(
1309 						&txrx_pdev->peer_ref_mutex);
1310 					if (peer) {
1311 						qdf_spin_lock_bh(
1312 							&peer->peer_info_lock);
1313 						qdf_mem_copy(
1314 							bssid,
1315 							&peer->mac_addr.raw,
1316 							QDF_MAC_ADDR_SIZE);
1317 						qdf_spin_unlock_bh(
1318 							&peer->peer_info_lock);
1319 					} else {
1320 						break;
1321 					}
1322 				} else {
1323 					break;
1324 				}
1325 			}
1326 			ucfg_pkt_capture_offload_deliver_indication_handler(
1327 							msg_word,
1328 							vdev_id, bssid, pdev);
1329 			break;
1330 		}
1331 		case HTT_T2H_MSG_TYPE_RX_PN_IND:
1332 		{
1333 			u_int16_t peer_id;
1334 			u_int8_t tid, pn_ie_cnt, *pn_ie = NULL;
1335 			int seq_num_start, seq_num_end;
1336 			int msg_len = qdf_nbuf_len(htt_t2h_msg);
1337 
1338 			if (msg_len < HTT_RX_PN_IND_BYTES) {
1339 				qdf_print("invalid nbuff len");
1340 				WARN_ON(1);
1341 				break;
1342 			}
1343 
1344 			/*First dword */
1345 			peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
1346 			tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);
1347 
1348 			msg_word++;
1349 			/*Second dword */
1350 			seq_num_start =
1351 				HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
1352 			seq_num_end =
1353 				HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
1354 			pn_ie_cnt =
1355 				HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);
1356 
1357 			if (msg_len - HTT_RX_PN_IND_BYTES <
1358 				pn_ie_cnt * sizeof(uint8_t)) {
1359 				qdf_print("invalid pn_ie len");
1360 				WARN_ON(1);
1361 				break;
1362 			}
1363 
1364 			msg_word++;
1365 			/*Third dword*/
1366 			if (pn_ie_cnt)
1367 				pn_ie = (u_int8_t *)msg_word;
1368 
1369 			ol_rx_pn_ind_handler(pdev->txrx_pdev, peer_id, tid,
1370 				seq_num_start, seq_num_end, pn_ie_cnt, pn_ie);
1371 
1372 			break;
1373 		}
1374 		case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1375 		{
1376 			int num_msdus;
1377 
1378 			num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
1379 			/*
1380 			 * each desc id will occupy 2 bytes.
1381 			 * the 4 is for htt msg header
1382 			 */
1383 			if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
1384 				HTT_TX_COMPL_HEAD_SZ) > msg_len) {
1385 				qdf_print("%s: num_msdus(%d) is invalid,"
1386 					"adf_nbuf_len = %d\n",
1387 					__FUNCTION__,
1388 					num_msdus,
1389 					msg_len);
1390 				break;
1391 			}
1392 
1393 			if (num_msdus & 0x1) {
1394 				struct htt_tx_compl_ind_base *compl =
1395 					(void *)msg_word;
1396 
1397 				/*
1398 				 * Host CPU endianness can be different
1399 				 * from FW CPU. This * can result in
1400 				 * even and odd MSDU IDs being switched.
1401 				 * If this happens, copy the switched
1402 				 * final odd MSDU ID from location
1403 				 * payload[size], to location
1404 				 * payload[size-1], where the message
1405 				 * handler function expects to find it
1406 				 */
1407 				if (compl->payload[num_msdus] !=
1408 				    HTT_TX_COMPL_INV_MSDU_ID) {
1409 					compl->payload[num_msdus - 1] =
1410 					compl->payload[num_msdus];
1411 				}
1412 			}
1413 			ol_tx_inspect_handler(pdev->txrx_pdev,
1414 					      num_msdus, msg_word + 1);
1415 			break;
1416 		}
1417 		case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
1418 		{
1419 			u_int16_t peer_id;
1420 			u_int8_t tid;
1421 			u_int8_t offload_ind, frag_ind;
1422 
1423 			if (qdf_unlikely(
1424 				  !pdev->cfg.is_full_reorder_offload)) {
1425 				qdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported when full reorder offload is disabled\n");
1426 				break;
1427 			}
1428 
1429 			if (qdf_unlikely(
1430 				pdev->txrx_pdev->cfg.is_high_latency)) {
1431 				qdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported on high latency\n");
1432 				break;
1433 			}
1434 
1435 			peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
1436 							*msg_word);
1437 			tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
1438 							*msg_word);
1439 			offload_ind =
1440 				HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(
1441 							*msg_word);
1442 			frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(
1443 							*msg_word);
1444 
1445 			if (qdf_unlikely(frag_ind)) {
1446 				ol_rx_frag_indication_handler(
1447 				pdev->txrx_pdev, htt_t2h_msg, peer_id,
1448 				tid);
1449 				break;
1450 			}
1451 
1452 			ol_rx_in_order_indication_handler(
1453 					pdev->txrx_pdev, htt_t2h_msg,
1454 					peer_id, tid, offload_ind);
1455 			break;
1456 		}
1457 		default:
1458 			htt_t2h_lp_msg_handler(context, htt_t2h_msg, false);
1459 			break;
1460 		};
1461 
1462 		/* Re-initialize the indication buffer */
1463 		HTT_T2H_MSG_BUF_REINIT(htt_t2h_msg, pdev->osdev);
1464 		qdf_nbuf_set_pktlen(htt_t2h_msg, 0);
1465 	}
1466 }
1467 #endif /* WLAN_FEATURE_FASTPATH */
1468 
1469 /*--- target->host HTT message Info Element access methods ------------------*/
1470 
1471 /*--- tx completion message ---*/
1472 
1473 uint16_t htt_tx_compl_desc_id(void *iterator, int num)
1474 {
1475 	/*
1476 	 * The MSDU IDs are packed , 2 per 32-bit word.
1477 	 * Iterate on them as an array of 16-bit elements.
1478 	 * This will work fine if the host endianness matches
1479 	 * the target endianness.
1480 	 * If the host endianness is opposite of the target's,
1481 	 * this iterator will produce descriptor IDs in a different
1482 	 * order than the target inserted them into the message -
1483 	 * if the target puts in [0, 1, 2, 3, ...] the host will
1484 	 * put out [1, 0, 3, 2, ...].
1485 	 * This is fine, except for the last ID if there are an
1486 	 * odd number of IDs.  But the TX_COMPL_IND handling code
1487 	 * in the htt_t2h_msg_handler already added a duplicate
1488 	 * of the final ID, if there were an odd number of IDs,
1489 	 * so this function can safely treat the IDs as an array
1490 	 * of 16-bit elements.
1491 	 */
1492 	return *(((uint16_t *) iterator) + num);
1493 }
1494 
1495 /*--- rx indication message ---*/
1496 
1497 int htt_rx_ind_flush(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1498 {
1499 	uint32_t *msg_word;
1500 
1501 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1502 	return HTT_RX_IND_FLUSH_VALID_GET(*msg_word);
1503 }
1504 
1505 void
1506 htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
1507 			       qdf_nbuf_t rx_ind_msg,
1508 			       unsigned int *seq_num_start,
1509 			       unsigned int *seq_num_end)
1510 {
1511 	uint32_t *msg_word;
1512 
1513 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1514 	msg_word++;
1515 	*seq_num_start = HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
1516 	*seq_num_end = HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
1517 }
1518 
1519 int htt_rx_ind_release(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1520 {
1521 	uint32_t *msg_word;
1522 
1523 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1524 	return HTT_RX_IND_REL_VALID_GET(*msg_word);
1525 }
1526 
1527 void
1528 htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
1529 				 qdf_nbuf_t rx_ind_msg,
1530 				 unsigned int *seq_num_start,
1531 				 unsigned int *seq_num_end)
1532 {
1533 	uint32_t *msg_word;
1534 
1535 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1536 	msg_word++;
1537 	*seq_num_start = HTT_RX_IND_REL_SEQ_NUM_START_GET(*msg_word);
1538 	*seq_num_end = HTT_RX_IND_REL_SEQ_NUM_END_GET(*msg_word);
1539 }
1540 
1541 void
1542 htt_rx_ind_mpdu_range_info(struct htt_pdev_t *pdev,
1543 			   qdf_nbuf_t rx_ind_msg,
1544 			   int mpdu_range_num,
1545 			   enum htt_rx_status *status, int *mpdu_count)
1546 {
1547 	uint32_t *msg_word;
1548 
1549 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1550 	msg_word += pdev->rx_mpdu_range_offset_words + mpdu_range_num;
1551 	*status = HTT_RX_IND_MPDU_STATUS_GET(*msg_word);
1552 	*mpdu_count = HTT_RX_IND_MPDU_COUNT_GET(*msg_word);
1553 }
1554 
1555 /**
1556  * htt_rx_ind_rssi_dbm() - Return the RSSI provided in a rx indication message.
1557  *
1558  * @pdev:       the HTT instance the rx data was received on
1559  * @rx_ind_msg: the netbuf containing the rx indication message
1560  *
1561  * Return the RSSI from an rx indication message, in dBm units.
1562  *
1563  * Return: RSSI in dBm, or HTT_INVALID_RSSI
1564  */
1565 int16_t htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1566 {
1567 	int8_t rssi;
1568 	uint32_t *msg_word;
1569 
1570 	msg_word = (uint32_t *)
1571 		   (qdf_nbuf_data(rx_ind_msg) +
1572 		    HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1573 
1574 	/* check if the RX_IND message contains valid rx PPDU start info */
1575 	if (!HTT_RX_IND_START_VALID_GET(*msg_word))
1576 		return HTT_RSSI_INVALID;
1577 
1578 	rssi = HTT_RX_IND_RSSI_CMB_GET(*msg_word);
1579 	return (HTT_TGT_RSSI_INVALID == rssi) ?
1580 	       HTT_RSSI_INVALID : rssi;
1581 }
1582 
1583 /**
1584  * htt_rx_ind_rssi_dbm_chain() - Return the RSSI for a chain provided in a rx
1585  *              indication message.
1586  * @pdev:       the HTT instance the rx data was received on
1587  * @rx_ind_msg: the netbuf containing the rx indication message
1588  * @chain:      the index of the chain (0-4)
1589  *
1590  * Return the RSSI for a chain from an rx indication message, in dBm units.
1591  *
1592  * Return: RSSI, or HTT_INVALID_RSSI
1593  */
1594 int16_t
1595 htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
1596 		      int8_t chain)
1597 {
1598 	int8_t rssi;
1599 	uint32_t *msg_word;
1600 
1601 	if (chain < 0 || chain > 3)
1602 		return HTT_RSSI_INVALID;
1603 
1604 	msg_word = (uint32_t *)
1605 		(qdf_nbuf_data(rx_ind_msg) +
1606 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1607 
1608 	/* check if the RX_IND message contains valid rx PPDU start info */
1609 	if (!HTT_RX_IND_START_VALID_GET(*msg_word))
1610 		return HTT_RSSI_INVALID;
1611 
1612 	msg_word += 1 + chain;
1613 
1614 	rssi = HTT_RX_IND_RSSI_PRI20_GET(*msg_word);
1615 	return (HTT_TGT_RSSI_INVALID == rssi) ?
1616 		HTT_RSSI_INVALID :
1617 		rssi;
1618 }
1619 
1620 /**
1621  * htt_rx_ind_legacy_rate() - Return the data rate
1622  * @pdev:        the HTT instance the rx data was received on
1623  * @rx_ind_msg:  the netbuf containing the rx indication message
1624  * @legacy_rate: (output) the data rate
1625  *      The legacy_rate parameter's value depends on the
1626  *      legacy_rate_sel value.
1627  *      If legacy_rate_sel is 0:
1628  *              0x8: OFDM 48 Mbps
1629  *              0x9: OFDM 24 Mbps
1630  *              0xA: OFDM 12 Mbps
1631  *              0xB: OFDM 6 Mbps
1632  *              0xC: OFDM 54 Mbps
1633  *              0xD: OFDM 36 Mbps
1634  *              0xE: OFDM 18 Mbps
1635  *              0xF: OFDM 9 Mbps
1636  *      If legacy_rate_sel is 1:
1637  *              0x8: CCK 11 Mbps long preamble
1638  *              0x9: CCK 5.5 Mbps long preamble
1639  *              0xA: CCK 2 Mbps long preamble
1640  *              0xB: CCK 1 Mbps long preamble
1641  *              0xC: CCK 11 Mbps short preamble
1642  *              0xD: CCK 5.5 Mbps short preamble
1643  *              0xE: CCK 2 Mbps short preamble
1644  *      -1 on error.
1645  * @legacy_rate_sel: (output) 0 to indicate OFDM, 1 to indicate CCK.
1646  *      -1 on error.
1647  *
1648  * Return the data rate provided in a rx indication message.
1649  */
1650 void
1651 htt_rx_ind_legacy_rate(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
1652 		       uint8_t *legacy_rate, uint8_t *legacy_rate_sel)
1653 {
1654 	uint32_t *msg_word;
1655 
1656 	msg_word = (uint32_t *)
1657 		(qdf_nbuf_data(rx_ind_msg) +
1658 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1659 
1660 	/* check if the RX_IND message contains valid rx PPDU start info */
1661 	if (!HTT_RX_IND_START_VALID_GET(*msg_word)) {
1662 		*legacy_rate = -1;
1663 		*legacy_rate_sel = -1;
1664 		return;
1665 	}
1666 
1667 	*legacy_rate = HTT_RX_IND_LEGACY_RATE_GET(*msg_word);
1668 	*legacy_rate_sel = HTT_RX_IND_LEGACY_RATE_SEL_GET(*msg_word);
1669 }
1670 
1671 /**
1672  * htt_rx_ind_timestamp() - Return the timestamp
1673  * @pdev:                  the HTT instance the rx data was received on
1674  * @rx_ind_msg:            the netbuf containing the rx indication message
1675  * @timestamp_microsec:    (output) the timestamp to microsecond resolution.
1676  *                         -1 on error.
1677  * @timestamp_submicrosec: the submicrosecond portion of the
1678  *                         timestamp. -1 on error.
1679  *
1680  * Return the timestamp provided in a rx indication message.
1681  */
1682 void
1683 htt_rx_ind_timestamp(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
1684 		     uint32_t *timestamp_microsec,
1685 		     uint8_t *timestamp_submicrosec)
1686 {
1687 	uint32_t *msg_word;
1688 
1689 	msg_word = (uint32_t *)
1690 		(qdf_nbuf_data(rx_ind_msg) +
1691 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1692 
1693 	/* check if the RX_IND message contains valid rx PPDU start info */
1694 	if (!HTT_RX_IND_END_VALID_GET(*msg_word)) {
1695 		*timestamp_microsec = -1;
1696 		*timestamp_submicrosec = -1;
1697 		return;
1698 	}
1699 
1700 	*timestamp_microsec = *(msg_word + 6);
1701 	*timestamp_submicrosec =
1702 		HTT_RX_IND_TIMESTAMP_SUBMICROSEC_GET(*msg_word);
1703 }
1704 
1705 #define INVALID_TSF -1
1706 /**
1707  * htt_rx_ind_tsf32() - Return the TSF timestamp
1708  * @pdev:       the HTT instance the rx data was received on
1709  * @rx_ind_msg: the netbuf containing the rx indication message
1710  *
1711  * Return the TSF timestamp provided in a rx indication message.
1712  *
1713  * Return: TSF timestamp
1714  */
1715 uint32_t
1716 htt_rx_ind_tsf32(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1717 {
1718 	uint32_t *msg_word;
1719 
1720 	msg_word = (uint32_t *)
1721 		(qdf_nbuf_data(rx_ind_msg) +
1722 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1723 
1724 	/* check if the RX_IND message contains valid rx PPDU start info */
1725 	if (!HTT_RX_IND_END_VALID_GET(*msg_word))
1726 		return INVALID_TSF;
1727 
1728 	return *(msg_word + 5);
1729 }
1730 
1731 /**
1732  * htt_rx_ind_ext_tid() - Return the extended traffic ID provided in a rx
1733  *			  indication message.
1734  * @pdev:       the HTT instance the rx data was received on
1735  * @rx_ind_msg: the netbuf containing the rx indication message
1736  *
1737  * Return the extended traffic ID in a rx indication message.
1738  *
1739  * Return: Extended TID
1740  */
1741 uint8_t
1742 htt_rx_ind_ext_tid(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1743 {
1744 	uint32_t *msg_word;
1745 
1746 	msg_word = (uint32_t *)
1747 		(qdf_nbuf_data(rx_ind_msg));
1748 
1749 	return HTT_RX_IND_EXT_TID_GET(*msg_word);
1750 }
1751 
1752 /*--- stats confirmation message ---*/
1753 
1754 void
1755 htt_t2h_dbg_stats_hdr_parse(uint8_t *stats_info_list,
1756 			    enum htt_dbg_stats_type *type,
1757 			    enum htt_dbg_stats_status *status,
1758 			    int *length, uint8_t **stats_data)
1759 {
1760 	uint32_t *msg_word = (uint32_t *) stats_info_list;
1761 	*type = HTT_T2H_STATS_CONF_TLV_TYPE_GET(*msg_word);
1762 	*status = HTT_T2H_STATS_CONF_TLV_STATUS_GET(*msg_word);
1763 	*length = HTT_T2H_STATS_CONF_TLV_HDR_SIZE +     /* header length */
1764 		HTT_T2H_STATS_CONF_TLV_LENGTH_GET(*msg_word); /* data len */
1765 	*stats_data = stats_info_list + HTT_T2H_STATS_CONF_TLV_HDR_SIZE;
1766 }
1767 
1768 void
1769 htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
1770 				    qdf_nbuf_t rx_frag_ind_msg,
1771 				    uint16_t *seq_num_start, uint16_t *seq_num_end)
1772 {
1773 	uint32_t *msg_word;
1774 
1775 	msg_word = (uint32_t *) qdf_nbuf_data(rx_frag_ind_msg);
1776 	msg_word++;
1777 	*seq_num_start = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
1778 	*seq_num_end = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
1779 }
1780