1 /*
2  * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  *  DOC: htt_t2h.c
22  *
23  *  brief Provide functions to process target->host HTT messages.
24  *  details
25  *  This file contains functions related to target->host HTT messages.
26  *  There are two categories of functions:
27  *  1.  A function that receives a HTT message from HTC, and dispatches it
28  *      based on the HTT message type.
29  *  2.  functions that provide the info elements from specific HTT messages.
30  */
31 #include <wma.h>
32 #include <htc_api.h>            /* HTC_PACKET */
33 #include <htt.h>                /* HTT_T2H_MSG_TYPE, etc. */
34 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
35 
36 #include <ol_rx.h>
37 #include <ol_htt_rx_api.h>
38 #include <ol_htt_tx_api.h>
39 #include <ol_txrx_htt_api.h>    /* htt_tx_status */
40 
41 #include <htt_internal.h>       /* HTT_TX_SCHED, etc. */
42 #include <pktlog_ac_fmt.h>
43 #include <wdi_event.h>
44 #include <ol_htt_tx_api.h>
45 #include <ol_txrx_peer_find.h>
46 #include <cdp_txrx_ipa.h>
47 #include "pktlog_ac.h"
48 #include <cdp_txrx_handle.h>
49 #include <wlan_pkt_capture_ucfg_api.h>
50 #include <ol_txrx.h>
51 /*--- target->host HTT message dispatch function ----------------------------*/
52 
53 #ifndef DEBUG_CREDIT
54 #define DEBUG_CREDIT 0
55 #endif
56 
57 #if defined(CONFIG_HL_SUPPORT)
58 
59 
60 
61 /**
62  * htt_rx_frag_set_last_msdu() - set last msdu bit in rx descriptor
63  *				 for received frames
64  * @pdev: Handle (pointer) to HTT pdev.
65  * @msg: htt received msg
66  *
67  * Return: None
68  */
69 static inline
htt_rx_frag_set_last_msdu(struct htt_pdev_t * pdev,qdf_nbuf_t msg)70 void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, qdf_nbuf_t msg)
71 {
72 }
73 #else
74 
htt_rx_frag_set_last_msdu(struct htt_pdev_t * pdev,qdf_nbuf_t msg)75 static void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, qdf_nbuf_t msg)
76 {
77 	uint32_t *msg_word;
78 	unsigned int num_msdu_bytes;
79 	qdf_nbuf_t msdu;
80 	struct htt_host_rx_desc_base *rx_desc;
81 	int start_idx;
82 	uint8_t *p_fw_msdu_rx_desc = 0;
83 
84 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
85 	num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
86 		*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
87 	/*
88 	 * 1 word for the message header,
89 	 * 1 word to specify the number of MSDU bytes,
90 	 * 1 word for every 4 MSDU bytes (round up),
91 	 * 1 word for the MPDU range header
92 	 */
93 	pdev->rx_mpdu_range_offset_words = 3 + ((num_msdu_bytes + 3) >> 2);
94 	pdev->rx_ind_msdu_byte_idx = 0;
95 
96 	p_fw_msdu_rx_desc = ((uint8_t *) (msg_word) +
97 			     HTT_ENDIAN_BYTE_IDX_SWAP
98 				     (HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET));
99 
100 	/*
101 	 * Fix for EV126710, in which BSOD occurs due to last_msdu bit
102 	 * not set while the next pointer is deliberately set to NULL
103 	 * before calling ol_rx_pn_check_base()
104 	 *
105 	 * For fragment frames, the HW may not have set the last_msdu bit
106 	 * in the rx descriptor, but the SW expects this flag to be set,
107 	 * since each fragment is in a separate MPDU. Thus, set the flag here,
108 	 * just in case the HW didn't.
109 	 */
110 	start_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
111 	msdu = pdev->rx_ring.buf.netbufs_ring[start_idx];
112 	qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
113 	qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
114 	rx_desc = htt_rx_desc(msdu);
115 	*((uint8_t *) &rx_desc->fw_desc.u.val) = *p_fw_msdu_rx_desc;
116 	rx_desc->msdu_end.last_msdu = 1;
117 	qdf_nbuf_map(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
118 }
119 #endif
120 
htt_t2h_mac_addr_deswizzle(uint8_t * tgt_mac_addr,uint8_t * buffer)121 static uint8_t *htt_t2h_mac_addr_deswizzle(uint8_t *tgt_mac_addr,
122 					   uint8_t *buffer)
123 {
124 #ifdef BIG_ENDIAN_HOST
125 	/*
126 	 * The host endianness is opposite of the target endianness.
127 	 * To make uint32_t elements come out correctly, the target->host
128 	 * upload has swizzled the bytes in each uint32_t element of the
129 	 * message.
130 	 * For byte-array message fields like the MAC address, this
131 	 * upload swizzling puts the bytes in the wrong order, and needs
132 	 * to be undone.
133 	 */
134 	buffer[0] = tgt_mac_addr[3];
135 	buffer[1] = tgt_mac_addr[2];
136 	buffer[2] = tgt_mac_addr[1];
137 	buffer[3] = tgt_mac_addr[0];
138 	buffer[4] = tgt_mac_addr[7];
139 	buffer[5] = tgt_mac_addr[6];
140 	return buffer;
141 #else
142 	/*
143 	 * The host endianness matches the target endianness -
144 	 * we can use the mac addr directly from the message buffer.
145 	 */
146 	return tgt_mac_addr;
147 #endif
148 }
149 
150 /**
151  * htt_ipa_op_response() - invoke an event handler from FW
152  * @pdev: Handle (pointer) to HTT pdev.
153  * @msg_word: htt msg
154  *
155  * Return: None
156  */
157 #ifdef IPA_OFFLOAD
htt_ipa_op_response(struct htt_pdev_t * pdev,uint32_t * msg_word)158 static void htt_ipa_op_response(struct htt_pdev_t *pdev, uint32_t *msg_word)
159 {
160 	uint8_t op_code;
161 	uint16_t len;
162 	uint8_t *op_msg_buffer;
163 	uint8_t *msg_start_ptr;
164 
165 	htc_pm_runtime_put(pdev->htc_pdev);
166 	msg_start_ptr = (uint8_t *) msg_word;
167 	op_code =
168 		HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word);
169 	msg_word++;
170 	len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word);
171 
172 	op_msg_buffer =
173 		qdf_mem_malloc(sizeof
174 				(struct htt_wdi_ipa_op_response_t) +
175 				len);
176 	if (!op_msg_buffer)
177 		return;
178 
179 	qdf_mem_copy(op_msg_buffer,
180 			msg_start_ptr,
181 			sizeof(struct htt_wdi_ipa_op_response_t) +
182 			len);
183 	cdp_ipa_op_response(cds_get_context(QDF_MODULE_ID_SOC),
184 			    OL_TXRX_PDEV_ID, op_msg_buffer);
185 }
186 #else
htt_ipa_op_response(struct htt_pdev_t * pdev,uint32_t * msg_word)187 static void htt_ipa_op_response(struct htt_pdev_t *pdev, uint32_t *msg_word)
188 {
189 }
190 #endif
191 
192 #ifndef QCN7605_SUPPORT
htt_t2h_adjust_bus_target_delta(struct htt_pdev_t * pdev,int32_t htt_credit_delta)193 static int htt_t2h_adjust_bus_target_delta(struct htt_pdev_t *pdev,
194 					   int32_t htt_credit_delta)
195 {
196 	if (pdev->cfg.is_high_latency && !pdev->cfg.default_tx_comp_req) {
197 		HTT_TX_MUTEX_ACQUIRE(&pdev->credit_mutex);
198 		qdf_atomic_add(htt_credit_delta,
199 			       &pdev->htt_tx_credit.target_delta);
200 		htt_credit_delta = htt_tx_credit_update(pdev);
201 		HTT_TX_MUTEX_RELEASE(&pdev->credit_mutex);
202 	}
203 	return htt_credit_delta;
204 }
205 #else
htt_t2h_adjust_bus_target_delta(struct htt_pdev_t * pdev,int32_t htt_credit_delta)206 static int htt_t2h_adjust_bus_target_delta(struct htt_pdev_t *pdev,
207 					   int32_t htt_credit_delta)
208 {
209 	return htt_credit_delta;
210 }
211 #endif
212 
213 #define MAX_TARGET_TX_CREDIT    204800
214 #define HTT_CFR_DUMP_COMPL_HEAD_SZ	4
215 
216 /* Target to host Msg/event  handler  for low priority messages*/
htt_t2h_lp_msg_handler(void * context,qdf_nbuf_t htt_t2h_msg,bool free_msg_buf)217 static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
218 				   bool free_msg_buf)
219 {
220 	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
221 	uint32_t *msg_word;
222 	enum htt_t2h_msg_type msg_type;
223 
224 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
225 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
226 	switch (msg_type) {
227 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
228 	{
229 		if (htc_dec_return_htt_runtime_cnt(pdev->htc_pdev) >= 0)
230 			htc_pm_runtime_put(pdev->htc_pdev);
231 
232 		pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
233 		pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
234 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
235 			  "target uses HTT version %d.%d; host uses %d.%d",
236 			  pdev->tgt_ver.major, pdev->tgt_ver.minor,
237 			  HTT_CURRENT_VERSION_MAJOR,
238 			  HTT_CURRENT_VERSION_MINOR);
239 		if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR)
240 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_WARN,
241 				  "*** Incompatible host/target HTT versions!");
242 		/* abort if the target is incompatible with the host */
243 		qdf_assert(pdev->tgt_ver.major ==
244 			   HTT_CURRENT_VERSION_MAJOR);
245 		if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
246 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
247 				  "*** Warning: host/target HTT versions are ");
248 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
249 				  "different, though compatible!");
250 		}
251 		break;
252 	}
253 	case HTT_T2H_MSG_TYPE_RX_FLUSH:
254 	{
255 		uint16_t peer_id;
256 		uint8_t tid;
257 		uint16_t seq_num_start, seq_num_end;
258 		enum htt_rx_flush_action action;
259 
260 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_RX_FLUSH_BYTES) {
261 			qdf_print("invalid nbuff len");
262 			WARN_ON(1);
263 			break;
264 		}
265 
266 		peer_id = HTT_RX_FLUSH_PEER_ID_GET(*msg_word);
267 		tid = HTT_RX_FLUSH_TID_GET(*msg_word);
268 		seq_num_start =
269 			HTT_RX_FLUSH_SEQ_NUM_START_GET(*(msg_word + 1));
270 		seq_num_end =
271 			HTT_RX_FLUSH_SEQ_NUM_END_GET(*(msg_word + 1));
272 		action =
273 			HTT_RX_FLUSH_MPDU_STATUS_GET(*(msg_word + 1)) ==
274 			1 ? htt_rx_flush_release : htt_rx_flush_discard;
275 		ol_rx_flush_handler(pdev->txrx_pdev, peer_id, tid,
276 				    seq_num_start, seq_num_end, action);
277 		break;
278 	}
279 	case HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND:
280 	{
281 		uint16_t msdu_cnt;
282 
283 		if (!pdev->cfg.is_high_latency &&
284 		    pdev->cfg.is_full_reorder_offload) {
285 			qdf_print("HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND not ");
286 			qdf_print("supported when full reorder offload is ");
287 			qdf_print("enabled in the configuration.\n");
288 			break;
289 		}
290 		msdu_cnt =
291 			HTT_RX_OFFLOAD_DELIVER_IND_MSDU_CNT_GET(*msg_word);
292 		ol_rx_offload_deliver_ind_handler(pdev->txrx_pdev,
293 						  htt_t2h_msg,
294 						  msdu_cnt);
295 		if (pdev->cfg.is_high_latency) {
296 			/*
297 			 * return here for HL to avoid double free on
298 			 * htt_t2h_msg
299 			 */
300 			return;
301 		}
302 		break;
303 	}
304 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND:
305 	{
306 		uint16_t peer_id;
307 		uint8_t tid;
308 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
309 
310 		if (msg_len < HTT_RX_FRAG_IND_BYTES) {
311 			qdf_print("invalid nbuff len");
312 			WARN_ON(1);
313 			break;
314 		}
315 		peer_id = HTT_RX_FRAG_IND_PEER_ID_GET(*msg_word);
316 		tid = HTT_RX_FRAG_IND_EXT_TID_GET(*msg_word);
317 		htt_rx_frag_set_last_msdu(pdev, htt_t2h_msg);
318 
319 		/* If packet len is invalid, will discard this frame. */
320 		if (pdev->cfg.is_high_latency) {
321 			u_int32_t rx_pkt_len = 0;
322 
323 			rx_pkt_len = qdf_nbuf_len(htt_t2h_msg);
324 
325 			if (rx_pkt_len < (HTT_RX_FRAG_IND_BYTES +
326 				sizeof(struct hl_htt_rx_ind_base)+
327 				sizeof(struct ieee80211_frame))) {
328 
329 				qdf_print("invalid packet len, %u", rx_pkt_len);
330 				/*
331 				 * This buf will be freed before
332 				 * exiting this function.
333 				 */
334 				break;
335 			}
336 		}
337 
338 		ol_rx_frag_indication_handler(pdev->txrx_pdev,
339 					      htt_t2h_msg,
340 					      peer_id, tid);
341 
342 		if (pdev->cfg.is_high_latency) {
343 			/*
344 			* For high latency solution,
345 			* HTT_T2H_MSG_TYPE_RX_FRAG_IND message and RX packet
346 			* share the same buffer. All buffer will be freed by
347 			* ol_rx_frag_indication_handler or upper layer to
348 			* avoid double free issue.
349 			*
350 			*/
351 			return;
352 		}
353 
354 		break;
355 	}
356 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
357 	{
358 		uint16_t peer_id;
359 		uint8_t tid;
360 		uint8_t win_sz;
361 		uint16_t start_seq_num;
362 
363 		/*
364 		 * FOR NOW, the host doesn't need to know the initial
365 		 * sequence number for rx aggregation.
366 		 * Thus, any value will do - specify 0.
367 		 */
368 		start_seq_num = 0;
369 		peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
370 		tid = HTT_RX_ADDBA_TID_GET(*msg_word);
371 		win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
372 		ol_rx_addba_handler(pdev->txrx_pdev, peer_id, tid,
373 				    win_sz, start_seq_num,
374 				    0 /* success */);
375 		break;
376 	}
377 	case HTT_T2H_MSG_TYPE_RX_DELBA:
378 	{
379 		uint16_t peer_id;
380 		uint8_t tid;
381 
382 		peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
383 		tid = HTT_RX_DELBA_TID_GET(*msg_word);
384 		ol_rx_delba_handler(pdev->txrx_pdev, peer_id, tid);
385 		break;
386 	}
387 	case HTT_T2H_MSG_TYPE_PEER_MAP:
388 	{
389 		uint8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
390 		uint8_t *peer_mac_addr;
391 		uint16_t peer_id;
392 		uint8_t vdev_id;
393 
394 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_RX_PEER_MAP_BYTES) {
395 			qdf_print("invalid nbuff len");
396 			WARN_ON(1);
397 			break;
398 		}
399 
400 		peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
401 		vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
402 		peer_mac_addr = htt_t2h_mac_addr_deswizzle(
403 			(uint8_t *) (msg_word + 1),
404 			&mac_addr_deswizzle_buf[0]);
405 
406 		if (peer_id > ol_cfg_max_peer_id(pdev->ctrl_pdev)) {
407 			qdf_print("%s: HTT_T2H_MSG_TYPE_PEER_MAP,"
408 				"invalid peer_id, %u\n",
409 				__FUNCTION__,
410 				peer_id);
411 			break;
412 		}
413 
414 		ol_rx_peer_map_handler(pdev->txrx_pdev, peer_id,
415 				       vdev_id, peer_mac_addr,
416 				       1 /*can tx */);
417 		break;
418 	}
419 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
420 	{
421 		uint16_t peer_id;
422 
423 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_RX_PEER_UNMAP_BYTES) {
424 			qdf_print("invalid nbuff len");
425 			WARN_ON(1);
426 			break;
427 		}
428 
429 		peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
430 		if (peer_id > ol_cfg_max_peer_id(pdev->ctrl_pdev)) {
431 			qdf_print("%s: HTT_T2H_MSG_TYPE_PEER_UNMAP,"
432 				"invalid peer_id, %u\n",
433 				__FUNCTION__,
434 				peer_id);
435 			break;
436 		}
437 
438 		ol_rx_peer_unmap_handler(pdev->txrx_pdev, peer_id);
439 		break;
440 	}
441 	case HTT_T2H_MSG_TYPE_SEC_IND:
442 	{
443 		uint16_t peer_id;
444 		enum htt_sec_type sec_type;
445 		int is_unicast;
446 
447 		if (qdf_nbuf_len(htt_t2h_msg) < HTT_SEC_IND_BYTES) {
448 			qdf_print("invalid nbuff len");
449 			WARN_ON(1);
450 			break;
451 		}
452 
453 		peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
454 		sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
455 		is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
456 		msg_word++;   /* point to the first part of the Michael key */
457 		ol_rx_sec_ind_handler(pdev->txrx_pdev, peer_id,
458 				      sec_type, is_unicast, msg_word,
459 				      msg_word + 2);
460 		break;
461 	}
462 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND:
463 	{
464 		struct htt_mgmt_tx_compl_ind *compl_msg;
465 		int32_t credit_delta = 1;
466 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
467 		if (msg_len < (sizeof(struct htt_mgmt_tx_compl_ind) + sizeof(*msg_word))) {
468 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
469 				  "Invalid msg_word length in HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND");
470 			WARN_ON(1);
471 			break;
472 		}
473 
474 		compl_msg =
475 			(struct htt_mgmt_tx_compl_ind *)(msg_word + 1);
476 
477 		if (pdev->cfg.is_high_latency) {
478 			if (!pdev->cfg.default_tx_comp_req) {
479 				HTT_TX_MUTEX_ACQUIRE(&pdev->credit_mutex);
480 				qdf_atomic_add(credit_delta,
481 					       &pdev->htt_tx_credit.
482 								target_delta);
483 				credit_delta = htt_tx_credit_update(pdev);
484 				HTT_TX_MUTEX_RELEASE(&pdev->credit_mutex);
485 			}
486 			if (credit_delta)
487 				ol_tx_target_credit_update(
488 						pdev->txrx_pdev, credit_delta);
489 		}
490 		ol_tx_desc_update_group_credit(
491 			pdev->txrx_pdev, compl_msg->desc_id, 1,
492 			0, compl_msg->status);
493 
494 		DPTRACE(qdf_dp_trace_credit_record(QDF_TX_COMP, QDF_CREDIT_INC,
495 			1, qdf_atomic_read(&pdev->txrx_pdev->target_tx_credit),
496 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[0].credit),
497 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[1].credit)));
498 
499 		if (!ol_tx_get_is_mgmt_over_wmi_enabled()) {
500 			ol_tx_single_completion_handler(pdev->txrx_pdev,
501 							compl_msg->status,
502 							compl_msg->desc_id);
503 			htc_pm_runtime_put(pdev->htc_pdev);
504 			HTT_TX_SCHED(pdev);
505 		} else {
506 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
507 				  "Ignoring HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND indication");
508 		}
509 		break;
510 	}
511 	case HTT_T2H_MSG_TYPE_STATS_CONF:
512 	{
513 		uint8_t cookie;
514 		uint8_t *stats_info_list;
515 
516 		cookie = *(msg_word + 1);
517 
518 		stats_info_list = (uint8_t *) (msg_word + 3);
519 		htc_pm_runtime_put(pdev->htc_pdev);
520 		ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie,
521 					 stats_info_list);
522 		break;
523 	}
524 #ifndef REMOVE_PKT_LOG
525 	case HTT_T2H_MSG_TYPE_PKTLOG:
526 	{
527 		uint32_t len = qdf_nbuf_len(htt_t2h_msg);
528 
529 		if (len < sizeof(*msg_word) + sizeof(uint32_t)) {
530 			qdf_print("invalid nbuff len");
531 			WARN_ON(1);
532 			break;
533 		}
534 
535 		/*len is reduced by sizeof(*msg_word)*/
536 		pktlog_process_fw_msg(OL_TXRX_PDEV_ID, msg_word + 1,
537 				      len - sizeof(*msg_word));
538 		break;
539 	}
540 #endif
541 	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
542 	{
543 		uint32_t htt_credit_delta_abs;
544 		int32_t htt_credit_delta;
545 		int sign, old_credit;
546 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
547 
548 		if (msg_len < HTT_TX_CREDIT_MSG_BYTES) {
549 			qdf_print("invalid nbuff len");
550 			WARN_ON(1);
551 			break;
552 		}
553 
554 		htt_credit_delta_abs =
555 			HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word);
556 		sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1;
557 		htt_credit_delta = sign * htt_credit_delta_abs;
558 
559 		old_credit = qdf_atomic_read(&pdev->htt_tx_credit.target_delta);
560 		if (((old_credit + htt_credit_delta) > MAX_TARGET_TX_CREDIT) ||
561 			((old_credit + htt_credit_delta) < -MAX_TARGET_TX_CREDIT)) {
562 			qdf_err("invalid update,old_credit=%d, htt_credit_delta=%d",
563 				old_credit, htt_credit_delta);
564 			break;
565 		}
566 		htt_credit_delta =
567 		htt_t2h_adjust_bus_target_delta(pdev, htt_credit_delta);
568 		htt_tx_group_credit_process(pdev, msg_word);
569 		DPTRACE(qdf_dp_trace_credit_record(QDF_TX_CREDIT_UPDATE,
570 			QDF_CREDIT_INC,	htt_credit_delta,
571 			qdf_atomic_read(&pdev->txrx_pdev->target_tx_credit) +
572 			htt_credit_delta,
573 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[0].credit),
574 			qdf_atomic_read(&pdev->txrx_pdev->txq_grps[1].credit)));
575 
576 		ol_tx_credit_completion_handler(pdev->txrx_pdev,
577 						htt_credit_delta);
578 		break;
579 	}
580 
581 	case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE:
582 	{
583 		uint16_t len;
584 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
585 		len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*(msg_word + 1));
586 
587 		if (sizeof(struct htt_wdi_ipa_op_response_t) + len > msg_len) {
588 			qdf_print("Invalid buf len size %zu len %d, msg_len %d",
589 				  sizeof(struct htt_wdi_ipa_op_response_t),
590 				  len, msg_len);
591 			WARN_ON(1);
592 			break;
593 		}
594 		htt_ipa_op_response(pdev, msg_word);
595 		break;
596 	}
597 
598 	case HTT_T2H_MSG_TYPE_FLOW_POOL_MAP:
599 	{
600 		uint8_t num_flows;
601 		struct htt_flow_pool_map_payload_t *pool_map_payoad;
602 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
603 
604 		num_flows = HTT_FLOW_POOL_MAP_NUM_FLOWS_GET(*msg_word);
605 
606 		if (((HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
607 			HTT_FLOW_POOL_MAP_HEADER_SZ) * num_flows + 1) * sizeof(*msg_word) > msg_len) {
608 			qdf_print("Invalid num_flows");
609 			WARN_ON(1);
610 			break;
611 		}
612 
613 		msg_word++;
614 		while (num_flows) {
615 			pool_map_payoad = (struct htt_flow_pool_map_payload_t *)
616 								msg_word;
617 			ol_tx_flow_pool_map_handler(pool_map_payoad->flow_id,
618 					pool_map_payoad->flow_type,
619 					pool_map_payoad->flow_pool_id,
620 					pool_map_payoad->flow_pool_size);
621 
622 			msg_word += (HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
623 						 HTT_FLOW_POOL_MAP_HEADER_SZ);
624 			num_flows--;
625 		}
626 		break;
627 	}
628 
629 	case HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP:
630 	{
631 		struct htt_flow_pool_unmap_t *pool_numap_payload;
632 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
633 
634 		if (msg_len < sizeof(struct htt_flow_pool_unmap_t)) {
635 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
636 				  "Invalid msg_word length in HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP");
637 			WARN_ON(1);
638 			break;
639 		}
640 
641 		pool_numap_payload = (struct htt_flow_pool_unmap_t *)msg_word;
642 		ol_tx_flow_pool_unmap_handler(pool_numap_payload->flow_id,
643 					pool_numap_payload->flow_type,
644 					pool_numap_payload->flow_pool_id);
645 		break;
646 	}
647 
648 	case HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE:
649 	{
650 		struct htt_flow_pool_resize_t *msg;
651 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
652 
653 		if (msg_len < sizeof(struct htt_flow_pool_resize_t)) {
654 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
655 				  "Invalid msg_word length in HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE");
656 			WARN_ON(1);
657 			break;
658 		}
659 
660 		msg = (struct htt_flow_pool_resize_t *)msg_word;
661 		ol_tx_flow_pool_resize_handler(msg->flow_pool_id,
662 					       msg->flow_pool_new_size);
663 
664 		break;
665 	}
666 
667 	case HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR:
668 	{
669 		switch (HTT_RX_OFLD_PKT_ERR_MSG_SUB_TYPE_GET(*msg_word)) {
670 		case HTT_RX_OFLD_PKT_ERR_TYPE_MIC_ERR:
671 		{
672 			struct ol_txrx_vdev_t *vdev;
673 			struct ol_txrx_peer_t *peer;
674 			uint64_t pn;
675 			uint32_t key_id;
676 			uint16_t peer_id;
677 			int msg_len = qdf_nbuf_len(htt_t2h_msg);
678 
679 			if (msg_len < HTT_RX_OFLD_PKT_ERR_MIC_ERR_BYTES) {
680 				qdf_print("invalid nbuff len");
681 				WARN_ON(1);
682 				break;
683 			}
684 
685 			peer_id = HTT_RX_OFLD_PKT_ERR_MIC_ERR_PEER_ID_GET
686 				(*(msg_word + 1));
687 
688 			peer = ol_txrx_peer_find_by_id(pdev->txrx_pdev,
689 						       peer_id);
690 			if (!peer) {
691 				qdf_print("invalid peer id %d", peer_id);
692 					  qdf_assert(0);
693 				break;
694 			}
695 			vdev = peer->vdev;
696 			key_id = HTT_RX_OFLD_PKT_ERR_MIC_ERR_KEYID_GET
697 				(*(msg_word + 1));
698 			qdf_mem_copy(&pn, (uint8_t *)(msg_word + 6), 6);
699 
700 			ol_rx_send_mic_err_ind(vdev->pdev, vdev->vdev_id,
701 					       peer->mac_addr.raw, 0, 0,
702 					       OL_RX_ERR_TKIP_MIC, htt_t2h_msg,
703 					       &pn, key_id);
704 			break;
705 		}
706 		default:
707 		{
708 			qdf_print("unhandled error type %d",
709 			  HTT_RX_OFLD_PKT_ERR_MSG_SUB_TYPE_GET(*msg_word));
710 			break;
711 		}
712 		}
713 		break;
714 	}
715 #ifdef WLAN_CFR_ENABLE
716 	case HTT_T2H_MSG_TYPE_CFR_DUMP_COMPL_IND:
717 	{
718 		int expected_len;
719 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
720 
721 		expected_len = HTT_CFR_DUMP_COMPL_HEAD_SZ +
722 				sizeof(struct htt_cfr_dump_compl_ind);
723 		if (msg_len < expected_len) {
724 			qdf_print("Invalid length of CFR capture event");
725 			break;
726 		}
727 
728 		ol_rx_cfr_capture_msg_handler(htt_t2h_msg);
729 		break;
730 	}
731 #endif
732 	default:
733 		break;
734 	};
735 	/* Free the indication buffer */
736 	if (free_msg_buf)
737 		qdf_nbuf_free(htt_t2h_msg);
738 }
739 
740 #define HTT_TX_COMPL_HEAD_SZ			4
741 #define HTT_TX_COMPL_BYTES_PER_MSDU_ID		2
742 
743 /*
744  * Generic Target to host Msg/event  handler  for low priority messages
745  * Low priority message are handler in a different handler called from
746  * this function . So that the most likely success path like Rx and
747  * Tx comp   has little code   foot print
748  */
htt_t2h_msg_handler(void * context,HTC_PACKET * pkt)749 void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
750 {
751 	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
752 	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
753 	uint32_t *msg_word;
754 	enum htt_t2h_msg_type msg_type;
755 
756 	/* check for successful message reception */
757 	if (pkt->Status != QDF_STATUS_SUCCESS) {
758 		if (pkt->Status != QDF_STATUS_E_CANCELED)
759 			pdev->stats.htc_err_cnt++;
760 		qdf_nbuf_free(htt_t2h_msg);
761 		return;
762 	}
763 #ifdef HTT_RX_RESTORE
764 	if (qdf_unlikely(pdev->rx_ring.rx_reset)) {
765 		qdf_print("rx restore ..\n");
766 		qdf_nbuf_free(htt_t2h_msg);
767 		return;
768 	}
769 #endif
770 
771 	/* confirm alignment */
772 	HTT_ASSERT3((((unsigned long)qdf_nbuf_data(htt_t2h_msg)) & 0x3) == 0);
773 
774 	msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
775 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
776 
777 #if defined(HELIUMPLUS_DEBUG)
778 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
779 		  "%s %d: msg_word 0x%x msg_type %d", __func__, __LINE__,
780 		  *msg_word, msg_type);
781 #endif
782 
783 	switch (msg_type) {
784 	case HTT_T2H_MSG_TYPE_RX_IND:
785 	{
786 		unsigned int num_mpdu_ranges;
787 		unsigned int num_msdu_bytes;
788 		unsigned int calculated_msg_len;
789 		unsigned int rx_mpdu_range_offset_bytes;
790 		uint16_t peer_id;
791 		uint8_t tid;
792 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
793 
794 		if (qdf_unlikely(pdev->cfg.is_full_reorder_offload)) {
795 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND not supported ");
796 			qdf_print("with full reorder offload\n");
797 			break;
798 		}
799 		peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
800 		tid = HTT_RX_IND_EXT_TID_GET(*msg_word);
801 
802 		if (tid >= OL_TXRX_NUM_EXT_TIDS) {
803 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n",
804 				tid);
805 			break;
806 		}
807 		if (msg_len < (2 + HTT_RX_PPDU_DESC_SIZE32 + 1) * sizeof(uint32_t)) {
808 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid msg_len\n");
809 			break;
810 		}
811 		num_msdu_bytes =
812 			HTT_RX_IND_FW_RX_DESC_BYTES_GET(
813 				*(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32));
814 		/*
815 		 * 1 word for the message header,
816 		 * HTT_RX_PPDU_DESC_SIZE32 words for the FW rx PPDU desc
817 		 * 1 word to specify the number of MSDU bytes,
818 		 * 1 word for every 4 MSDU bytes (round up),
819 		 * 1 word for the MPDU range header
820 		 */
821 		rx_mpdu_range_offset_bytes =
822 			(HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3);
823 		if (qdf_unlikely(num_msdu_bytes >
824 				 rx_mpdu_range_offset_bytes)) {
825 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %u\n",
826 				  "num_msdu_bytes",
827 				  num_msdu_bytes);
828 			WARN_ON(1);
829 			break;
830 		}
831 		pdev->rx_mpdu_range_offset_words =
832 			rx_mpdu_range_offset_bytes >> 2;
833 		num_mpdu_ranges =
834 			HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
835 		pdev->rx_ind_msdu_byte_idx = 0;
836 		if (qdf_unlikely(rx_mpdu_range_offset_bytes >
837 		    msg_len)) {
838 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %d\n",
839 				  "rx_mpdu_range_offset_words",
840 				  pdev->rx_mpdu_range_offset_words);
841 			WARN_ON(1);
842 			break;
843 		}
844 		calculated_msg_len = rx_mpdu_range_offset_bytes +
845 			(num_mpdu_ranges * (int)sizeof(uint32_t));
846 		/*
847 		 * Check that the addition and multiplication
848 		 * do not cause integer overflow
849 		 */
850 		if (qdf_unlikely(calculated_msg_len <
851 		    rx_mpdu_range_offset_bytes)) {
852 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %u\n",
853 				  "num_mpdu_ranges",
854 				  (num_mpdu_ranges * (int)sizeof(uint32_t)));
855 			WARN_ON(1);
856 			break;
857 		}
858 		if (qdf_unlikely(calculated_msg_len > msg_len)) {
859 			qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid %s %u\n",
860 				  "offset_words + mpdu_ranges",
861 				  calculated_msg_len);
862 			WARN_ON(1);
863 			break;
864 		}
865 		ol_rx_indication_handler(pdev->txrx_pdev,
866 					 htt_t2h_msg, peer_id,
867 					 tid, num_mpdu_ranges);
868 
869 		if (pdev->cfg.is_high_latency)
870 			return;
871 
872 		break;
873 	}
874 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
875 	{
876 		int old_credit;
877 		int num_msdus;
878 		enum htt_tx_status status;
879 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
880 
881 		/* status - no enum translation needed */
882 		status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
883 		num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
884 
885 		/*
886 		 * each desc id will occupy 2 bytes.
887 		 * the 4 is for htt msg header
888 		 */
889 		if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
890 			HTT_TX_COMPL_HEAD_SZ) > msg_len) {
891 			qdf_print("%s: num_msdus(%d) is invalid,"
892 				"adf_nbuf_len = %d\n",
893 				__FUNCTION__,
894 				num_msdus,
895 				msg_len);
896 			break;
897 		}
898 
899 		if (num_msdus & 0x1) {
900 			struct htt_tx_compl_ind_base *compl =
901 				(void *)msg_word;
902 
903 			/*
904 			 * Host CPU endianness can be different from FW CPU.
905 			 * This can result in even and odd MSDU IDs being
906 			 * switched. If this happens, copy the switched final
907 			 * odd MSDU ID from location payload[size], to
908 			 * location payload[size-1], where the message
909 			 * handler function expects to find it
910 			 */
911 			if (compl->payload[num_msdus] !=
912 			    HTT_TX_COMPL_INV_MSDU_ID) {
913 				compl->payload[num_msdus - 1] =
914 					compl->payload[num_msdus];
915 			}
916 		}
917 
918 		if (pdev->cfg.is_high_latency &&
919 		    !pdev->cfg.credit_update_enabled) {
920 			old_credit = qdf_atomic_read(
921 						&pdev->htt_tx_credit.target_delta);
922 			if (((old_credit + num_msdus) > MAX_TARGET_TX_CREDIT) ||
923 				((old_credit + num_msdus) < -MAX_TARGET_TX_CREDIT)) {
924 				qdf_err("invalid update,old_credit=%d, num_msdus=%d",
925 					old_credit, num_msdus);
926 			} else {
927 				if (!pdev->cfg.default_tx_comp_req) {
928 					int credit_delta;
929 
930 					HTT_TX_MUTEX_ACQUIRE(&pdev->credit_mutex);
931 					qdf_atomic_add(num_msdus,
932 						       &pdev->htt_tx_credit.
933 							target_delta);
934 					credit_delta = htt_tx_credit_update(pdev);
935 					HTT_TX_MUTEX_RELEASE(&pdev->credit_mutex);
936 
937 					if (credit_delta) {
938 						ol_tx_target_credit_update(
939 								pdev->txrx_pdev,
940 								credit_delta);
941 					}
942 				} else {
943 					ol_tx_target_credit_update(pdev->txrx_pdev,
944 								   num_msdus);
945 				}
946 			}
947 		}
948 
949 		ol_tx_completion_handler(pdev->txrx_pdev, num_msdus,
950 					 status, msg_word);
951 		HTT_TX_SCHED(pdev);
952 		break;
953 	}
954 	case HTT_T2H_MSG_TYPE_RX_PN_IND:
955 	{
956 		uint16_t peer_id;
957 		uint8_t tid, pn_ie_cnt, *pn_ie = NULL;
958 		uint16_t seq_num_start, seq_num_end;
959 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
960 
961 		if (msg_len < HTT_RX_PN_IND_BYTES) {
962 			qdf_print("invalid nbuff len");
963 			WARN_ON(1);
964 			break;
965 		}
966 
967 		/*First dword */
968 		peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
969 		tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);
970 
971 		msg_word++;
972 		/*Second dword */
973 		seq_num_start =
974 			HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
975 		seq_num_end = HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
976 		pn_ie_cnt = HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);
977 
978 		if (msg_len - HTT_RX_PN_IND_BYTES <
979 		    pn_ie_cnt * sizeof(uint8_t)) {
980 			qdf_print("invalid pn_ie count");
981 			WARN_ON(1);
982 			break;
983 		}
984 
985 		msg_word++;
986 		/*Third dword */
987 		if (pn_ie_cnt)
988 			pn_ie = (uint8_t *) msg_word;
989 
990 		ol_rx_pn_ind_handler(pdev->txrx_pdev, peer_id, tid,
991 				     seq_num_start, seq_num_end,
992 				     pn_ie_cnt, pn_ie);
993 
994 		break;
995 	}
996 	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
997 	{
998 		int num_msdus;
999 		int msg_len = qdf_nbuf_len(htt_t2h_msg);
1000 
1001 		num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
1002 		/*
1003 		 * each desc id will occupy 2 bytes.
1004 		 * the 4 is for htt msg header
1005 		 */
1006 		if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
1007 			HTT_TX_COMPL_HEAD_SZ) > msg_len) {
1008 			qdf_print("%s: num_msdus(%d) is invalid,"
1009 				"adf_nbuf_len = %d\n",
1010 				__FUNCTION__,
1011 				num_msdus,
1012 				msg_len);
1013 			break;
1014 		}
1015 
1016 		if (num_msdus & 0x1) {
1017 			struct htt_tx_compl_ind_base *compl =
1018 				(void *)msg_word;
1019 
1020 			/*
1021 			 * Host CPU endianness can be different from FW CPU.
1022 			 * This can result in even and odd MSDU IDs being
1023 			 * switched. If this happens, copy the switched final
1024 			 * odd MSDU ID from location payload[size], to
1025 			 * location payload[size-1], where the message handler
1026 			 * function expects to find it
1027 			 */
1028 			if (compl->payload[num_msdus] !=
1029 			    HTT_TX_COMPL_INV_MSDU_ID) {
1030 				compl->payload[num_msdus - 1] =
1031 					compl->payload[num_msdus];
1032 			}
1033 		}
1034 		ol_tx_inspect_handler(pdev->txrx_pdev, num_msdus,
1035 				      msg_word + 1);
1036 		HTT_TX_SCHED(pdev);
1037 		break;
1038 	}
1039 	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
1040 	{
1041 		uint16_t peer_id;
1042 		uint8_t tid;
1043 		uint8_t offload_ind, frag_ind;
1044 
1045 		if (qdf_unlikely(!pdev->cfg.is_full_reorder_offload)) {
1046 			qdf_print("full reorder offload is disable");
1047 			break;
1048 		}
1049 
1050 		if (qdf_unlikely(pdev->cfg.is_high_latency)) {
1051 			qdf_print("full reorder offload not support in HL");
1052 			break;
1053 		}
1054 
1055 		peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word);
1056 		tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word);
1057 		offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
1058 		frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
1059 
1060 #if defined(HELIUMPLUS_DEBUG)
1061 		qdf_print("peerid %d tid %d offloadind %d fragind %d",
1062 			  peer_id, tid, offload_ind,
1063 			  frag_ind);
1064 #endif
1065 		if (qdf_unlikely(frag_ind)) {
1066 			ol_rx_frag_indication_handler(pdev->txrx_pdev,
1067 						      htt_t2h_msg,
1068 						      peer_id, tid);
1069 			break;
1070 		}
1071 
1072 		ol_rx_in_order_indication_handler(pdev->txrx_pdev,
1073 						  htt_t2h_msg, peer_id,
1074 						  tid, offload_ind);
1075 		break;
1076 	}
1077 
1078 	default:
1079 		htt_t2h_lp_msg_handler(context, htt_t2h_msg, true);
1080 		return;
1081 
1082 	};
1083 
1084 	/* Free the indication buffer */
1085 	qdf_nbuf_free(htt_t2h_msg);
1086 }
1087 
1088 #ifdef WLAN_FEATURE_FASTPATH
1089 #define HTT_T2H_MSG_BUF_REINIT(_buf, dev)				\
1090 	do {								\
1091 		qdf_nbuf_push_head(_buf, (HTC_HEADER_LEN) +		\
1092 				   HTC_HDR_ALIGNMENT_PADDING);		\
1093 		qdf_nbuf_init_fast((_buf));				\
1094 		qdf_mem_dma_sync_single_for_device(dev,			\
1095 					(QDF_NBUF_CB_PADDR(_buf)),	\
1096 					(skb_end_pointer(_buf) -	\
1097 					(_buf)->data),			\
1098 					DMA_FROM_DEVICE);		\
1099 	} while (0)
1100 
1101 /**
1102  * htt_t2h_msg_handler_fast() -  Fastpath specific message handler
1103  * @context: HTT context
1104  * @cmpl_msdus: netbuf completions
1105  * @num_cmpls: number of completions to be handled
1106  *
1107  * Return: None
1108  */
htt_t2h_msg_handler_fast(void * context,qdf_nbuf_t * cmpl_msdus,uint32_t num_cmpls)1109 void htt_t2h_msg_handler_fast(void *context, qdf_nbuf_t *cmpl_msdus,
1110 			      uint32_t num_cmpls)
1111 {
1112 	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
1113 	qdf_nbuf_t htt_t2h_msg;
1114 	uint32_t *msg_word;
1115 	uint32_t i;
1116 	enum htt_t2h_msg_type msg_type;
1117 	uint32_t msg_len;
1118 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1119 
1120 	for (i = 0; i < num_cmpls; i++) {
1121 		htt_t2h_msg = cmpl_msdus[i];
1122 		msg_len = qdf_nbuf_len(htt_t2h_msg);
1123 
1124 		/*
1125 		 * Move the data pointer to point to HTT header
1126 		 * past the HTC header + HTC header alignment padding
1127 		 */
1128 		qdf_nbuf_pull_head(htt_t2h_msg, HTC_HEADER_LEN +
1129 				   HTC_HDR_ALIGNMENT_PADDING);
1130 
1131 		/* confirm alignment */
1132 		HTT_ASSERT3((((unsigned long) qdf_nbuf_data(htt_t2h_msg)) & 0x3)
1133 			    == 0);
1134 
1135 		msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
1136 		msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
1137 
1138 		switch (msg_type) {
1139 		case HTT_T2H_MSG_TYPE_RX_IND:
1140 		{
1141 			unsigned int num_mpdu_ranges;
1142 			unsigned int num_msdu_bytes;
1143 			unsigned int calculated_msg_len;
1144 			unsigned int rx_mpdu_range_offset_bytes;
1145 			u_int16_t peer_id;
1146 			u_int8_t tid;
1147 			msg_len = qdf_nbuf_len(htt_t2h_msg);
1148 
1149 			peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
1150 			tid = HTT_RX_IND_EXT_TID_GET(*msg_word);
1151 			if (tid >= OL_TXRX_NUM_EXT_TIDS) {
1152 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n",
1153 					tid);
1154 				WARN_ON(1);
1155 				break;
1156 			}
1157 			num_msdu_bytes =
1158 				HTT_RX_IND_FW_RX_DESC_BYTES_GET(
1159 				*(msg_word + 2 +
1160 				  HTT_RX_PPDU_DESC_SIZE32));
1161 			/*
1162 			 * 1 word for the message header,
1163 			 * HTT_RX_PPDU_DESC_SIZE32 words for the FW
1164 			 * rx PPDU desc.
1165 			 * 1 word to specify the number of MSDU bytes,
1166 			 * 1 word for every 4 MSDU bytes (round up),
1167 			 * 1 word for the MPDU range header
1168 			 */
1169 			rx_mpdu_range_offset_bytes =
1170 				(HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3);
1171 			if (qdf_unlikely(num_msdu_bytes >
1172 					 rx_mpdu_range_offset_bytes)) {
1173 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %u\n",
1174 					  "invalid num_msdu_bytes",
1175 					  num_msdu_bytes);
1176 				WARN_ON(1);
1177 				break;
1178 			}
1179 			pdev->rx_mpdu_range_offset_words =
1180 				rx_mpdu_range_offset_bytes >> 2;
1181 			num_mpdu_ranges =
1182 				HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word
1183 								 + 1));
1184 			pdev->rx_ind_msdu_byte_idx = 0;
1185 			if (qdf_unlikely(rx_mpdu_range_offset_bytes >
1186 					 msg_len)) {
1187 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %d\n",
1188 					  "invalid rx_mpdu_range_offset_words",
1189 					  pdev->rx_mpdu_range_offset_words);
1190 				WARN_ON(1);
1191 				break;
1192 			}
1193 			calculated_msg_len = rx_mpdu_range_offset_bytes +
1194 					     (num_mpdu_ranges *
1195 					     (int)sizeof(uint32_t));
1196 			/*
1197 			 * Check that the addition and multiplication
1198 			 * do not cause integer overflow
1199 			 */
1200 			if (qdf_unlikely(calculated_msg_len <
1201 					 rx_mpdu_range_offset_bytes)) {
1202 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %u\n",
1203 					  "invalid num_mpdu_ranges",
1204 					  (num_mpdu_ranges *
1205 					   (int)sizeof(uint32_t)));
1206 				WARN_ON(1);
1207 				break;
1208 			}
1209 			if (qdf_unlikely(calculated_msg_len > msg_len)) {
1210 				qdf_print("HTT_T2H_MSG_TYPE_RX_IND, %s %u\n",
1211 					  "invalid offset_words + mpdu_ranges",
1212 					  calculated_msg_len);
1213 				WARN_ON(1);
1214 				break;
1215 			}
1216 			ol_rx_indication_handler(pdev->txrx_pdev, htt_t2h_msg,
1217 						 peer_id, tid, num_mpdu_ranges);
1218 			break;
1219 		}
1220 		case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
1221 		{
1222 			int num_msdus;
1223 			enum htt_tx_status status;
1224 
1225 			/* status - no enum translation needed */
1226 			status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
1227 			num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
1228 
1229 			/*
1230 			 * each desc id will occupy 2 bytes.
1231 			 * the 4 is for htt msg header
1232 			 */
1233 			if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
1234 				HTT_TX_COMPL_HEAD_SZ) > msg_len) {
1235 				qdf_print("%s: num_msdus(%d) is invalid,"
1236 					"adf_nbuf_len = %d\n",
1237 					__FUNCTION__,
1238 					num_msdus,
1239 					msg_len);
1240 				break;
1241 			}
1242 
1243 			if (num_msdus & 0x1) {
1244 				struct htt_tx_compl_ind_base *compl =
1245 					(void *)msg_word;
1246 
1247 				/*
1248 				 * Host CPU endianness can be different
1249 				 * from FW CPU. This can result in even
1250 				 * and odd MSDU IDs being switched. If
1251 				 * this happens, copy the switched final
1252 				 * odd MSDU ID from location
1253 				 * payload[size], to location
1254 				 * payload[size-1],where the message
1255 				 * handler function expects to find it
1256 				 */
1257 				if (compl->payload[num_msdus] !=
1258 				    HTT_TX_COMPL_INV_MSDU_ID) {
1259 					compl->payload[num_msdus - 1] =
1260 						compl->payload[num_msdus];
1261 				}
1262 			}
1263 			ol_tx_completion_handler(pdev->txrx_pdev, num_msdus,
1264 						 status, msg_word);
1265 
1266 			break;
1267 		}
1268 		case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
1269 		{
1270 			struct htt_tx_offload_deliver_ind_hdr_t
1271 							*offload_deliver_msg;
1272 			uint8_t vdev_id;
1273 			struct ol_txrx_vdev_t *vdev;
1274 			bool is_pkt_during_roam = false;
1275 			struct ol_txrx_pdev_t *txrx_pdev = pdev->txrx_pdev;
1276 			struct ol_txrx_peer_t *peer;
1277 			uint8_t bssid[QDF_MAC_ADDR_SIZE];
1278 			uint32_t freq = 0;
1279 
1280 			if (!(ucfg_pkt_capture_get_pktcap_mode((void *)soc->psoc) &
1281 			      PKT_CAPTURE_MODE_DATA_ONLY))
1282 				break;
1283 
1284 			offload_deliver_msg =
1285 			(struct htt_tx_offload_deliver_ind_hdr_t *)msg_word;
1286 			is_pkt_during_roam =
1287 			(offload_deliver_msg->reserved_2 ? true : false);
1288 
1289 			if (qdf_unlikely(
1290 				!pdev->cfg.is_full_reorder_offload)) {
1291 				break;
1292 			}
1293 
1294 			/* Is FW sends offload data during roaming */
1295 			if (is_pkt_during_roam) {
1296 				vdev_id = HTT_INVALID_VDEV;
1297 				freq =
1298 				(uint32_t)offload_deliver_msg->reserved_3;
1299 				htt_rx_mon_note_capture_channel(
1300 						pdev, cds_freq_to_chan(freq));
1301 			} else {
1302 				vdev_id = offload_deliver_msg->vdev_id;
1303 				vdev = (struct ol_txrx_vdev_t *)
1304 					ol_txrx_get_vdev_from_vdev_id(vdev_id);
1305 
1306 				if (vdev) {
1307 					qdf_spin_lock_bh(
1308 						&txrx_pdev->peer_ref_mutex);
1309 					peer = TAILQ_FIRST(&vdev->peer_list);
1310 					qdf_spin_unlock_bh(
1311 						&txrx_pdev->peer_ref_mutex);
1312 					if (peer) {
1313 						qdf_spin_lock_bh(
1314 							&peer->peer_info_lock);
1315 						qdf_mem_copy(
1316 							bssid,
1317 							&peer->mac_addr.raw,
1318 							QDF_MAC_ADDR_SIZE);
1319 						qdf_spin_unlock_bh(
1320 							&peer->peer_info_lock);
1321 					} else {
1322 						break;
1323 					}
1324 				} else {
1325 					break;
1326 				}
1327 			}
1328 			ucfg_pkt_capture_offload_deliver_indication_handler(
1329 							msg_word,
1330 							vdev_id, bssid, pdev);
1331 			break;
1332 		}
1333 		case HTT_T2H_MSG_TYPE_RX_PN_IND:
1334 		{
1335 			u_int16_t peer_id;
1336 			u_int8_t tid, pn_ie_cnt, *pn_ie = NULL;
1337 			int seq_num_start, seq_num_end;
1338 			int msg_len = qdf_nbuf_len(htt_t2h_msg);
1339 
1340 			if (msg_len < HTT_RX_PN_IND_BYTES) {
1341 				qdf_print("invalid nbuff len");
1342 				WARN_ON(1);
1343 				break;
1344 			}
1345 
1346 			/*First dword */
1347 			peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
1348 			tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);
1349 
1350 			msg_word++;
1351 			/*Second dword */
1352 			seq_num_start =
1353 				HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
1354 			seq_num_end =
1355 				HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
1356 			pn_ie_cnt =
1357 				HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);
1358 
1359 			if (msg_len - HTT_RX_PN_IND_BYTES <
1360 				pn_ie_cnt * sizeof(uint8_t)) {
1361 				qdf_print("invalid pn_ie len");
1362 				WARN_ON(1);
1363 				break;
1364 			}
1365 
1366 			msg_word++;
1367 			/*Third dword*/
1368 			if (pn_ie_cnt)
1369 				pn_ie = (u_int8_t *)msg_word;
1370 
1371 			ol_rx_pn_ind_handler(pdev->txrx_pdev, peer_id, tid,
1372 				seq_num_start, seq_num_end, pn_ie_cnt, pn_ie);
1373 
1374 			break;
1375 		}
1376 		case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1377 		{
1378 			int num_msdus;
1379 
1380 			num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
1381 			/*
1382 			 * each desc id will occupy 2 bytes.
1383 			 * the 4 is for htt msg header
1384 			 */
1385 			if ((num_msdus * HTT_TX_COMPL_BYTES_PER_MSDU_ID +
1386 				HTT_TX_COMPL_HEAD_SZ) > msg_len) {
1387 				qdf_print("%s: num_msdus(%d) is invalid,"
1388 					"adf_nbuf_len = %d\n",
1389 					__FUNCTION__,
1390 					num_msdus,
1391 					msg_len);
1392 				break;
1393 			}
1394 
1395 			if (num_msdus & 0x1) {
1396 				struct htt_tx_compl_ind_base *compl =
1397 					(void *)msg_word;
1398 
1399 				/*
1400 				 * Host CPU endianness can be different
1401 				 * from FW CPU. This * can result in
1402 				 * even and odd MSDU IDs being switched.
1403 				 * If this happens, copy the switched
1404 				 * final odd MSDU ID from location
1405 				 * payload[size], to location
1406 				 * payload[size-1], where the message
1407 				 * handler function expects to find it
1408 				 */
1409 				if (compl->payload[num_msdus] !=
1410 				    HTT_TX_COMPL_INV_MSDU_ID) {
1411 					compl->payload[num_msdus - 1] =
1412 					compl->payload[num_msdus];
1413 				}
1414 			}
1415 			ol_tx_inspect_handler(pdev->txrx_pdev,
1416 					      num_msdus, msg_word + 1);
1417 			break;
1418 		}
1419 		case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
1420 		{
1421 			u_int16_t peer_id;
1422 			u_int8_t tid;
1423 			u_int8_t offload_ind, frag_ind;
1424 
1425 			if (qdf_unlikely(
1426 				  !pdev->cfg.is_full_reorder_offload)) {
1427 				qdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported when full reorder offload is disabled\n");
1428 				break;
1429 			}
1430 
1431 			if (qdf_unlikely(
1432 				pdev->txrx_pdev->cfg.is_high_latency)) {
1433 				qdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported on high latency\n");
1434 				break;
1435 			}
1436 
1437 			peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
1438 							*msg_word);
1439 			tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
1440 							*msg_word);
1441 			offload_ind =
1442 				HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(
1443 							*msg_word);
1444 			frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(
1445 							*msg_word);
1446 
1447 			if (qdf_unlikely(frag_ind)) {
1448 				ol_rx_frag_indication_handler(
1449 				pdev->txrx_pdev, htt_t2h_msg, peer_id,
1450 				tid);
1451 				break;
1452 			}
1453 
1454 			ol_rx_in_order_indication_handler(
1455 					pdev->txrx_pdev, htt_t2h_msg,
1456 					peer_id, tid, offload_ind);
1457 			break;
1458 		}
1459 		default:
1460 			htt_t2h_lp_msg_handler(context, htt_t2h_msg, false);
1461 			break;
1462 		};
1463 
1464 		/* Re-initialize the indication buffer */
1465 		HTT_T2H_MSG_BUF_REINIT(htt_t2h_msg, pdev->osdev);
1466 		qdf_nbuf_set_pktlen(htt_t2h_msg, 0);
1467 	}
1468 }
1469 #endif /* WLAN_FEATURE_FASTPATH */
1470 
1471 /*--- target->host HTT message Info Element access methods ------------------*/
1472 
1473 /*--- tx completion message ---*/
1474 
htt_tx_compl_desc_id(void * iterator,int num)1475 uint16_t htt_tx_compl_desc_id(void *iterator, int num)
1476 {
1477 	/*
1478 	 * The MSDU IDs are packed , 2 per 32-bit word.
1479 	 * Iterate on them as an array of 16-bit elements.
1480 	 * This will work fine if the host endianness matches
1481 	 * the target endianness.
1482 	 * If the host endianness is opposite of the target's,
1483 	 * this iterator will produce descriptor IDs in a different
1484 	 * order than the target inserted them into the message -
1485 	 * if the target puts in [0, 1, 2, 3, ...] the host will
1486 	 * put out [1, 0, 3, 2, ...].
1487 	 * This is fine, except for the last ID if there are an
1488 	 * odd number of IDs.  But the TX_COMPL_IND handling code
1489 	 * in the htt_t2h_msg_handler already added a duplicate
1490 	 * of the final ID, if there were an odd number of IDs,
1491 	 * so this function can safely treat the IDs as an array
1492 	 * of 16-bit elements.
1493 	 */
1494 	return *(((uint16_t *) iterator) + num);
1495 }
1496 
1497 /*--- rx indication message ---*/
1498 
htt_rx_ind_flush(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg)1499 int htt_rx_ind_flush(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1500 {
1501 	uint32_t *msg_word;
1502 
1503 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1504 	return HTT_RX_IND_FLUSH_VALID_GET(*msg_word);
1505 }
1506 
1507 void
htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,unsigned int * seq_num_start,unsigned int * seq_num_end)1508 htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
1509 			       qdf_nbuf_t rx_ind_msg,
1510 			       unsigned int *seq_num_start,
1511 			       unsigned int *seq_num_end)
1512 {
1513 	uint32_t *msg_word;
1514 
1515 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1516 	msg_word++;
1517 	*seq_num_start = HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
1518 	*seq_num_end = HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
1519 }
1520 
htt_rx_ind_release(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg)1521 int htt_rx_ind_release(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1522 {
1523 	uint32_t *msg_word;
1524 
1525 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1526 	return HTT_RX_IND_REL_VALID_GET(*msg_word);
1527 }
1528 
1529 void
htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,unsigned int * seq_num_start,unsigned int * seq_num_end)1530 htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
1531 				 qdf_nbuf_t rx_ind_msg,
1532 				 unsigned int *seq_num_start,
1533 				 unsigned int *seq_num_end)
1534 {
1535 	uint32_t *msg_word;
1536 
1537 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1538 	msg_word++;
1539 	*seq_num_start = HTT_RX_IND_REL_SEQ_NUM_START_GET(*msg_word);
1540 	*seq_num_end = HTT_RX_IND_REL_SEQ_NUM_END_GET(*msg_word);
1541 }
1542 
1543 void
htt_rx_ind_mpdu_range_info(struct htt_pdev_t * pdev,qdf_nbuf_t rx_ind_msg,int mpdu_range_num,enum htt_rx_status * status,int * mpdu_count)1544 htt_rx_ind_mpdu_range_info(struct htt_pdev_t *pdev,
1545 			   qdf_nbuf_t rx_ind_msg,
1546 			   int mpdu_range_num,
1547 			   enum htt_rx_status *status, int *mpdu_count)
1548 {
1549 	uint32_t *msg_word;
1550 
1551 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1552 	msg_word += pdev->rx_mpdu_range_offset_words + mpdu_range_num;
1553 	*status = HTT_RX_IND_MPDU_STATUS_GET(*msg_word);
1554 	*mpdu_count = HTT_RX_IND_MPDU_COUNT_GET(*msg_word);
1555 }
1556 
1557 /**
1558  * htt_rx_ind_rssi_dbm() - Return the RSSI provided in a rx indication message.
1559  *
1560  * @pdev:       the HTT instance the rx data was received on
1561  * @rx_ind_msg: the netbuf containing the rx indication message
1562  *
1563  * Return the RSSI from an rx indication message, in dBm units.
1564  *
1565  * Return: RSSI in dBm, or HTT_INVALID_RSSI
1566  */
htt_rx_ind_rssi_dbm(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg)1567 int16_t htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1568 {
1569 	int8_t rssi;
1570 	uint32_t *msg_word;
1571 
1572 	msg_word = (uint32_t *)
1573 		   (qdf_nbuf_data(rx_ind_msg) +
1574 		    HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1575 
1576 	/* check if the RX_IND message contains valid rx PPDU start info */
1577 	if (!HTT_RX_IND_START_VALID_GET(*msg_word))
1578 		return HTT_RSSI_INVALID;
1579 
1580 	rssi = HTT_RX_IND_RSSI_CMB_GET(*msg_word);
1581 	return (HTT_TGT_RSSI_INVALID == rssi) ?
1582 	       HTT_RSSI_INVALID : rssi;
1583 }
1584 
1585 /**
1586  * htt_rx_ind_rssi_dbm_chain() - Return the RSSI for a chain provided in a rx
1587  *              indication message.
1588  * @pdev:       the HTT instance the rx data was received on
1589  * @rx_ind_msg: the netbuf containing the rx indication message
1590  * @chain:      the index of the chain (0-4)
1591  *
1592  * Return the RSSI for a chain from an rx indication message, in dBm units.
1593  *
1594  * Return: RSSI, or HTT_INVALID_RSSI
1595  */
1596 int16_t
htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,int8_t chain)1597 htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
1598 		      int8_t chain)
1599 {
1600 	int8_t rssi;
1601 	uint32_t *msg_word;
1602 
1603 	if (chain < 0 || chain > 3)
1604 		return HTT_RSSI_INVALID;
1605 
1606 	msg_word = (uint32_t *)
1607 		(qdf_nbuf_data(rx_ind_msg) +
1608 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1609 
1610 	/* check if the RX_IND message contains valid rx PPDU start info */
1611 	if (!HTT_RX_IND_START_VALID_GET(*msg_word))
1612 		return HTT_RSSI_INVALID;
1613 
1614 	msg_word += 1 + chain;
1615 
1616 	rssi = HTT_RX_IND_RSSI_PRI20_GET(*msg_word);
1617 	return (HTT_TGT_RSSI_INVALID == rssi) ?
1618 		HTT_RSSI_INVALID :
1619 		rssi;
1620 }
1621 
1622 /**
1623  * htt_rx_ind_legacy_rate() - Return the data rate
1624  * @pdev:        the HTT instance the rx data was received on
1625  * @rx_ind_msg:  the netbuf containing the rx indication message
1626  * @legacy_rate: (output) the data rate
1627  *      The legacy_rate parameter's value depends on the
1628  *      legacy_rate_sel value.
1629  *      If legacy_rate_sel is 0:
1630  *              0x8: OFDM 48 Mbps
1631  *              0x9: OFDM 24 Mbps
1632  *              0xA: OFDM 12 Mbps
1633  *              0xB: OFDM 6 Mbps
1634  *              0xC: OFDM 54 Mbps
1635  *              0xD: OFDM 36 Mbps
1636  *              0xE: OFDM 18 Mbps
1637  *              0xF: OFDM 9 Mbps
1638  *      If legacy_rate_sel is 1:
1639  *              0x8: CCK 11 Mbps long preamble
1640  *              0x9: CCK 5.5 Mbps long preamble
1641  *              0xA: CCK 2 Mbps long preamble
1642  *              0xB: CCK 1 Mbps long preamble
1643  *              0xC: CCK 11 Mbps short preamble
1644  *              0xD: CCK 5.5 Mbps short preamble
1645  *              0xE: CCK 2 Mbps short preamble
1646  *      -1 on error.
1647  * @legacy_rate_sel: (output) 0 to indicate OFDM, 1 to indicate CCK.
1648  *      -1 on error.
1649  *
1650  * Return the data rate provided in a rx indication message.
1651  */
1652 void
htt_rx_ind_legacy_rate(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,uint8_t * legacy_rate,uint8_t * legacy_rate_sel)1653 htt_rx_ind_legacy_rate(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
1654 		       uint8_t *legacy_rate, uint8_t *legacy_rate_sel)
1655 {
1656 	uint32_t *msg_word;
1657 
1658 	msg_word = (uint32_t *)
1659 		(qdf_nbuf_data(rx_ind_msg) +
1660 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1661 
1662 	/* check if the RX_IND message contains valid rx PPDU start info */
1663 	if (!HTT_RX_IND_START_VALID_GET(*msg_word)) {
1664 		*legacy_rate = -1;
1665 		*legacy_rate_sel = -1;
1666 		return;
1667 	}
1668 
1669 	*legacy_rate = HTT_RX_IND_LEGACY_RATE_GET(*msg_word);
1670 	*legacy_rate_sel = HTT_RX_IND_LEGACY_RATE_SEL_GET(*msg_word);
1671 }
1672 
1673 /**
1674  * htt_rx_ind_timestamp() - Return the timestamp
1675  * @pdev:                  the HTT instance the rx data was received on
1676  * @rx_ind_msg:            the netbuf containing the rx indication message
1677  * @timestamp_microsec:    (output) the timestamp to microsecond resolution.
1678  *                         -1 on error.
1679  * @timestamp_submicrosec: the submicrosecond portion of the
1680  *                         timestamp. -1 on error.
1681  *
1682  * Return the timestamp provided in a rx indication message.
1683  */
1684 void
htt_rx_ind_timestamp(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,uint32_t * timestamp_microsec,uint8_t * timestamp_submicrosec)1685 htt_rx_ind_timestamp(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
1686 		     uint32_t *timestamp_microsec,
1687 		     uint8_t *timestamp_submicrosec)
1688 {
1689 	uint32_t *msg_word;
1690 
1691 	msg_word = (uint32_t *)
1692 		(qdf_nbuf_data(rx_ind_msg) +
1693 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1694 
1695 	/* check if the RX_IND message contains valid rx PPDU start info */
1696 	if (!HTT_RX_IND_END_VALID_GET(*msg_word)) {
1697 		*timestamp_microsec = -1;
1698 		*timestamp_submicrosec = -1;
1699 		return;
1700 	}
1701 
1702 	*timestamp_microsec = *(msg_word + 6);
1703 	*timestamp_submicrosec =
1704 		HTT_RX_IND_TIMESTAMP_SUBMICROSEC_GET(*msg_word);
1705 }
1706 
1707 #define INVALID_TSF -1
1708 /**
1709  * htt_rx_ind_tsf32() - Return the TSF timestamp
1710  * @pdev:       the HTT instance the rx data was received on
1711  * @rx_ind_msg: the netbuf containing the rx indication message
1712  *
1713  * Return the TSF timestamp provided in a rx indication message.
1714  *
1715  * Return: TSF timestamp
1716  */
1717 uint32_t
htt_rx_ind_tsf32(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg)1718 htt_rx_ind_tsf32(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1719 {
1720 	uint32_t *msg_word;
1721 
1722 	msg_word = (uint32_t *)
1723 		(qdf_nbuf_data(rx_ind_msg) +
1724 		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
1725 
1726 	/* check if the RX_IND message contains valid rx PPDU start info */
1727 	if (!HTT_RX_IND_END_VALID_GET(*msg_word))
1728 		return INVALID_TSF;
1729 
1730 	return *(msg_word + 5);
1731 }
1732 
1733 /**
1734  * htt_rx_ind_ext_tid() - Return the extended traffic ID provided in a rx
1735  *			  indication message.
1736  * @pdev:       the HTT instance the rx data was received on
1737  * @rx_ind_msg: the netbuf containing the rx indication message
1738  *
1739  * Return the extended traffic ID in a rx indication message.
1740  *
1741  * Return: Extended TID
1742  */
1743 uint8_t
htt_rx_ind_ext_tid(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg)1744 htt_rx_ind_ext_tid(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1745 {
1746 	uint32_t *msg_word;
1747 
1748 	msg_word = (uint32_t *)
1749 		(qdf_nbuf_data(rx_ind_msg));
1750 
1751 	return HTT_RX_IND_EXT_TID_GET(*msg_word);
1752 }
1753 
1754 /*--- stats confirmation message ---*/
1755 
1756 void
htt_t2h_dbg_stats_hdr_parse(uint8_t * stats_info_list,enum htt_dbg_stats_type * type,enum htt_dbg_stats_status * status,int * length,uint8_t ** stats_data)1757 htt_t2h_dbg_stats_hdr_parse(uint8_t *stats_info_list,
1758 			    enum htt_dbg_stats_type *type,
1759 			    enum htt_dbg_stats_status *status,
1760 			    int *length, uint8_t **stats_data)
1761 {
1762 	uint32_t *msg_word = (uint32_t *) stats_info_list;
1763 	*type = HTT_T2H_STATS_CONF_TLV_TYPE_GET(*msg_word);
1764 	*status = HTT_T2H_STATS_CONF_TLV_STATUS_GET(*msg_word);
1765 	*length = HTT_T2H_STATS_CONF_TLV_HDR_SIZE +     /* header length */
1766 		HTT_T2H_STATS_CONF_TLV_LENGTH_GET(*msg_word); /* data len */
1767 	*stats_data = stats_info_list + HTT_T2H_STATS_CONF_TLV_HDR_SIZE;
1768 }
1769 
1770 void
htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,qdf_nbuf_t rx_frag_ind_msg,uint16_t * seq_num_start,uint16_t * seq_num_end)1771 htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
1772 				    qdf_nbuf_t rx_frag_ind_msg,
1773 				    uint16_t *seq_num_start, uint16_t *seq_num_end)
1774 {
1775 	uint32_t *msg_word;
1776 
1777 	msg_word = (uint32_t *) qdf_nbuf_data(rx_frag_ind_msg);
1778 	msg_word++;
1779 	*seq_num_start = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
1780 	*seq_num_end = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
1781 }
1782